Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 ARM Ltd. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 15 | */ |
| 16 | #ifndef __ASM_SPINLOCK_H |
| 17 | #define __ASM_SPINLOCK_H |
| 18 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 19 | #include <asm/lse.h> |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 20 | #include <asm/spinlock_types.h> |
| 21 | #include <asm/processor.h> |
| 22 | |
| 23 | /* |
| 24 | * Spinlock implementation. |
| 25 | * |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 26 | * The memory barriers are implicit with the load-acquire and store-release |
| 27 | * instructions. |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 28 | */ |
Will Deacon | d86b8da | 2015-11-19 17:48:31 +0000 | [diff] [blame] | 29 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
| 30 | { |
| 31 | unsigned int tmp; |
| 32 | arch_spinlock_t lockval; |
Will Deacon | c56bdca | 2016-06-02 18:40:07 +0100 | [diff] [blame] | 33 | u32 owner; |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 34 | |
Will Deacon | 38b850a | 2016-06-02 15:27:04 +0100 | [diff] [blame] | 35 | /* |
| 36 | * Ensure prior spin_lock operations to other locks have completed |
| 37 | * on this CPU before we test whether "lock" is locked. |
| 38 | */ |
| 39 | smp_mb(); |
Will Deacon | c56bdca | 2016-06-02 18:40:07 +0100 | [diff] [blame] | 40 | owner = READ_ONCE(lock->owner) << 16; |
Will Deacon | 38b850a | 2016-06-02 15:27:04 +0100 | [diff] [blame] | 41 | |
Will Deacon | d86b8da | 2015-11-19 17:48:31 +0000 | [diff] [blame] | 42 | asm volatile( |
| 43 | " sevl\n" |
| 44 | "1: wfe\n" |
| 45 | "2: ldaxr %w0, %2\n" |
Will Deacon | c56bdca | 2016-06-02 18:40:07 +0100 | [diff] [blame] | 46 | /* Is the lock free? */ |
Will Deacon | d86b8da | 2015-11-19 17:48:31 +0000 | [diff] [blame] | 47 | " eor %w1, %w0, %w0, ror #16\n" |
Will Deacon | c56bdca | 2016-06-02 18:40:07 +0100 | [diff] [blame] | 48 | " cbz %w1, 3f\n" |
| 49 | /* Lock taken -- has there been a subsequent unlock->lock transition? */ |
| 50 | " eor %w1, %w3, %w0, lsl #16\n" |
| 51 | " cbz %w1, 1b\n" |
| 52 | /* |
| 53 | * The owner has been updated, so there was an unlock->lock |
| 54 | * transition that we missed. That means we can rely on the |
| 55 | * store-release of the unlock operation paired with the |
| 56 | * load-acquire of the lock operation to publish any of our |
| 57 | * previous stores to the new lock owner and therefore don't |
| 58 | * need to bother with the writeback below. |
| 59 | */ |
| 60 | " b 4f\n" |
| 61 | "3:\n" |
| 62 | /* |
| 63 | * Serialise against any concurrent lockers by writing back the |
| 64 | * unlocked lock value |
| 65 | */ |
Will Deacon | d86b8da | 2015-11-19 17:48:31 +0000 | [diff] [blame] | 66 | ARM64_LSE_ATOMIC_INSN( |
| 67 | /* LL/SC */ |
| 68 | " stxr %w1, %w0, %2\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 69 | __nops(2), |
Will Deacon | 3a5facd | 2016-06-08 15:10:57 +0100 | [diff] [blame] | 70 | /* LSE atomics */ |
| 71 | " mov %w1, %w0\n" |
| 72 | " cas %w0, %w0, %2\n" |
| 73 | " eor %w1, %w1, %w0\n") |
Will Deacon | c56bdca | 2016-06-02 18:40:07 +0100 | [diff] [blame] | 74 | /* Somebody else wrote to the lock, GOTO 10 and reload the value */ |
Will Deacon | 3a5facd | 2016-06-08 15:10:57 +0100 | [diff] [blame] | 75 | " cbnz %w1, 2b\n" |
Will Deacon | c56bdca | 2016-06-02 18:40:07 +0100 | [diff] [blame] | 76 | "4:" |
Will Deacon | d86b8da | 2015-11-19 17:48:31 +0000 | [diff] [blame] | 77 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) |
Will Deacon | c56bdca | 2016-06-02 18:40:07 +0100 | [diff] [blame] | 78 | : "r" (owner) |
Will Deacon | d86b8da | 2015-11-19 17:48:31 +0000 | [diff] [blame] | 79 | : "memory"); |
| 80 | } |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 81 | |
| 82 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
| 83 | |
| 84 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
| 85 | { |
| 86 | unsigned int tmp; |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 87 | arch_spinlock_t lockval, newval; |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 88 | |
| 89 | asm volatile( |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 90 | /* Atomically increment the next ticket. */ |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 91 | ARM64_LSE_ATOMIC_INSN( |
| 92 | /* LL/SC */ |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 93 | " prfm pstl1strm, %3\n" |
| 94 | "1: ldaxr %w0, %3\n" |
| 95 | " add %w1, %w0, %w5\n" |
| 96 | " stxr %w2, %w1, %3\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 97 | " cbnz %w2, 1b\n", |
| 98 | /* LSE atomics */ |
| 99 | " mov %w2, %w5\n" |
| 100 | " ldadda %w2, %w0, %3\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 101 | __nops(3) |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 102 | ) |
| 103 | |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 104 | /* Did we get the lock? */ |
| 105 | " eor %w1, %w0, %w0, ror #16\n" |
| 106 | " cbz %w1, 3f\n" |
| 107 | /* |
| 108 | * No: spin on the owner. Send a local event to avoid missing an |
| 109 | * unlock before the exclusive load. |
| 110 | */ |
| 111 | " sevl\n" |
| 112 | "2: wfe\n" |
| 113 | " ldaxrh %w2, %4\n" |
| 114 | " eor %w1, %w2, %w0, lsr #16\n" |
| 115 | " cbnz %w1, 2b\n" |
| 116 | /* We got the lock. Critical section starts here. */ |
| 117 | "3:" |
| 118 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) |
| 119 | : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) |
| 120 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
| 124 | { |
| 125 | unsigned int tmp; |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 126 | arch_spinlock_t lockval; |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 127 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 128 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 129 | /* LL/SC */ |
| 130 | " prfm pstl1strm, %2\n" |
| 131 | "1: ldaxr %w0, %2\n" |
| 132 | " eor %w1, %w0, %w0, ror #16\n" |
| 133 | " cbnz %w1, 2f\n" |
| 134 | " add %w0, %w0, %3\n" |
| 135 | " stxr %w1, %w0, %2\n" |
| 136 | " cbnz %w1, 1b\n" |
| 137 | "2:", |
| 138 | /* LSE atomics */ |
| 139 | " ldr %w0, %2\n" |
| 140 | " eor %w1, %w0, %w0, ror #16\n" |
| 141 | " cbnz %w1, 1f\n" |
| 142 | " add %w1, %w0, %3\n" |
| 143 | " casa %w0, %w1, %2\n" |
| 144 | " and %w1, %w1, #0xffff\n" |
| 145 | " eor %w1, %w1, %w0, lsr #16\n" |
| 146 | "1:") |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 147 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) |
| 148 | : "I" (1 << TICKET_SHIFT) |
| 149 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 150 | |
| 151 | return !tmp; |
| 152 | } |
| 153 | |
| 154 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
| 155 | { |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 156 | unsigned long tmp; |
| 157 | |
| 158 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 159 | /* LL/SC */ |
Will Deacon | c1d7cd2 | 2015-07-28 14:48:00 +0100 | [diff] [blame] | 160 | " ldrh %w1, %0\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 161 | " add %w1, %w1, #1\n" |
| 162 | " stlrh %w1, %0", |
| 163 | /* LSE atomics */ |
| 164 | " mov %w1, #1\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 165 | " staddlh %w1, %0\n" |
| 166 | __nops(1)) |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 167 | : "=Q" (lock->owner), "=&r" (tmp) |
| 168 | : |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 169 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 170 | } |
| 171 | |
Will Deacon | 5686b06 | 2013-10-09 15:54:27 +0100 | [diff] [blame] | 172 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
| 173 | { |
| 174 | return lock.owner == lock.next; |
| 175 | } |
| 176 | |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 177 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
| 178 | { |
Will Deacon | 38b850a | 2016-06-02 15:27:04 +0100 | [diff] [blame] | 179 | smp_mb(); /* See arch_spin_unlock_wait */ |
Christian Borntraeger | af2e7aa | 2014-11-24 10:53:11 +0100 | [diff] [blame] | 180 | return !arch_spin_value_unlocked(READ_ONCE(*lock)); |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
| 184 | { |
Christian Borntraeger | af2e7aa | 2014-11-24 10:53:11 +0100 | [diff] [blame] | 185 | arch_spinlock_t lockval = READ_ONCE(*lock); |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 186 | return (lockval.next - lockval.owner) > 1; |
| 187 | } |
| 188 | #define arch_spin_is_contended arch_spin_is_contended |
| 189 | |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 190 | /* |
| 191 | * Write lock implementation. |
| 192 | * |
| 193 | * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is |
| 194 | * exclusively held. |
| 195 | * |
| 196 | * The memory barriers are implicit with the load-acquire and store-release |
| 197 | * instructions. |
| 198 | */ |
| 199 | |
| 200 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 201 | { |
| 202 | unsigned int tmp; |
| 203 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 204 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 205 | /* LL/SC */ |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 206 | " sevl\n" |
| 207 | "1: wfe\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 208 | "2: ldaxr %w0, %1\n" |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 209 | " cbnz %w0, 1b\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 210 | " stxr %w0, %w2, %1\n" |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 211 | " cbnz %w0, 2b\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 212 | __nops(1), |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 213 | /* LSE atomics */ |
| 214 | "1: mov %w0, wzr\n" |
| 215 | "2: casa %w0, %w2, %1\n" |
| 216 | " cbz %w0, 3f\n" |
| 217 | " ldxr %w0, %1\n" |
| 218 | " cbz %w0, 2b\n" |
| 219 | " wfe\n" |
| 220 | " b 1b\n" |
| 221 | "3:") |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 222 | : "=&r" (tmp), "+Q" (rw->lock) |
| 223 | : "r" (0x80000000) |
Will Deacon | 95c4189 | 2014-02-04 12:29:13 +0000 | [diff] [blame] | 224 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 228 | { |
| 229 | unsigned int tmp; |
| 230 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 231 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 232 | /* LL/SC */ |
Will Deacon | 9511ca1 | 2015-07-22 18:25:52 +0100 | [diff] [blame] | 233 | "1: ldaxr %w0, %1\n" |
| 234 | " cbnz %w0, 2f\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 235 | " stxr %w0, %w2, %1\n" |
Will Deacon | 9511ca1 | 2015-07-22 18:25:52 +0100 | [diff] [blame] | 236 | " cbnz %w0, 1b\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 237 | "2:", |
| 238 | /* LSE atomics */ |
| 239 | " mov %w0, wzr\n" |
| 240 | " casa %w0, %w2, %1\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 241 | __nops(2)) |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 242 | : "=&r" (tmp), "+Q" (rw->lock) |
| 243 | : "r" (0x80000000) |
Will Deacon | 95c4189 | 2014-02-04 12:29:13 +0000 | [diff] [blame] | 244 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 245 | |
| 246 | return !tmp; |
| 247 | } |
| 248 | |
| 249 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 250 | { |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 251 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 252 | " stlr wzr, %0", |
| 253 | " swpl wzr, wzr, %0") |
| 254 | : "=Q" (rw->lock) :: "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 255 | } |
| 256 | |
| 257 | /* write_can_lock - would write_trylock() succeed? */ |
| 258 | #define arch_write_can_lock(x) ((x)->lock == 0) |
| 259 | |
| 260 | /* |
| 261 | * Read lock implementation. |
| 262 | * |
| 263 | * It exclusively loads the lock value, increments it and stores the new value |
| 264 | * back if positive and the CPU still exclusively owns the location. If the |
| 265 | * value is negative, the lock is already held. |
| 266 | * |
| 267 | * During unlocking there may be multiple active read locks but no write lock. |
| 268 | * |
| 269 | * The memory barriers are implicit with the load-acquire and store-release |
| 270 | * instructions. |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 271 | * |
| 272 | * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC |
| 273 | * and LSE implementations may exhibit different behaviour (although this |
| 274 | * will have no effect on lockdep). |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 275 | */ |
| 276 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 277 | { |
| 278 | unsigned int tmp, tmp2; |
| 279 | |
| 280 | asm volatile( |
| 281 | " sevl\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 282 | ARM64_LSE_ATOMIC_INSN( |
| 283 | /* LL/SC */ |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 284 | "1: wfe\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 285 | "2: ldaxr %w0, %2\n" |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 286 | " add %w0, %w0, #1\n" |
| 287 | " tbnz %w0, #31, 1b\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 288 | " stxr %w1, %w0, %2\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 289 | " cbnz %w1, 2b\n" |
| 290 | __nops(1), |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 291 | /* LSE atomics */ |
| 292 | "1: wfe\n" |
| 293 | "2: ldxr %w0, %2\n" |
| 294 | " adds %w1, %w0, #1\n" |
| 295 | " tbnz %w1, #31, 1b\n" |
| 296 | " casa %w0, %w1, %2\n" |
| 297 | " sbc %w0, %w1, %w0\n" |
| 298 | " cbnz %w0, 2b") |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 299 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
| 300 | : |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 301 | : "cc", "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 302 | } |
| 303 | |
| 304 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 305 | { |
| 306 | unsigned int tmp, tmp2; |
| 307 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 308 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 309 | /* LL/SC */ |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 310 | "1: ldxr %w0, %2\n" |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 311 | " sub %w0, %w0, #1\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 312 | " stlxr %w1, %w0, %2\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 313 | " cbnz %w1, 1b", |
| 314 | /* LSE atomics */ |
| 315 | " movn %w0, #0\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 316 | " staddl %w0, %2\n" |
| 317 | __nops(2)) |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 318 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
| 319 | : |
Will Deacon | 95c4189 | 2014-02-04 12:29:13 +0000 | [diff] [blame] | 320 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
| 324 | { |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 325 | unsigned int tmp, tmp2; |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 326 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 327 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 328 | /* LL/SC */ |
| 329 | " mov %w1, #1\n" |
Will Deacon | 9511ca1 | 2015-07-22 18:25:52 +0100 | [diff] [blame] | 330 | "1: ldaxr %w0, %2\n" |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 331 | " add %w0, %w0, #1\n" |
Will Deacon | 9511ca1 | 2015-07-22 18:25:52 +0100 | [diff] [blame] | 332 | " tbnz %w0, #31, 2f\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 333 | " stxr %w1, %w0, %2\n" |
Will Deacon | 9511ca1 | 2015-07-22 18:25:52 +0100 | [diff] [blame] | 334 | " cbnz %w1, 1b\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 335 | "2:", |
| 336 | /* LSE atomics */ |
| 337 | " ldr %w0, %2\n" |
| 338 | " adds %w1, %w0, #1\n" |
| 339 | " tbnz %w1, #31, 1f\n" |
| 340 | " casa %w0, %w1, %2\n" |
| 341 | " sbc %w1, %w1, %w0\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 342 | __nops(1) |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 343 | "1:") |
| 344 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 345 | : |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 346 | : "cc", "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 347 | |
| 348 | return !tmp2; |
| 349 | } |
| 350 | |
| 351 | /* read_can_lock - would read_trylock() succeed? */ |
| 352 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) |
| 353 | |
| 354 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 355 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 356 | |
| 357 | #define arch_spin_relax(lock) cpu_relax() |
| 358 | #define arch_read_relax(lock) cpu_relax() |
| 359 | #define arch_write_relax(lock) cpu_relax() |
| 360 | |
Will Deacon | 872c63f | 2016-09-05 11:56:05 +0100 | [diff] [blame] | 361 | /* |
| 362 | * Accesses appearing in program order before a spin_lock() operation |
| 363 | * can be reordered with accesses inside the critical section, by virtue |
| 364 | * of arch_spin_lock being constructed using acquire semantics. |
| 365 | * |
| 366 | * In cases where this is problematic (e.g. try_to_wake_up), an |
| 367 | * smp_mb__before_spinlock() can restore the required ordering. |
| 368 | */ |
| 369 | #define smp_mb__before_spinlock() smp_mb() |
| 370 | |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 371 | #endif /* __ASM_SPINLOCK_H */ |