Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 ARM Ltd. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 15 | */ |
| 16 | #ifndef __ASM_SPINLOCK_H |
| 17 | #define __ASM_SPINLOCK_H |
| 18 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 19 | #include <asm/lse.h> |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 20 | #include <asm/spinlock_types.h> |
| 21 | #include <asm/processor.h> |
| 22 | |
| 23 | /* |
| 24 | * Spinlock implementation. |
| 25 | * |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 26 | * The memory barriers are implicit with the load-acquire and store-release |
| 27 | * instructions. |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 28 | */ |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 29 | |
| 30 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
| 31 | |
| 32 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
| 33 | { |
| 34 | unsigned int tmp; |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 35 | arch_spinlock_t lockval, newval; |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 36 | |
| 37 | asm volatile( |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 38 | /* Atomically increment the next ticket. */ |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 39 | ARM64_LSE_ATOMIC_INSN( |
| 40 | /* LL/SC */ |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 41 | " prfm pstl1strm, %3\n" |
| 42 | "1: ldaxr %w0, %3\n" |
| 43 | " add %w1, %w0, %w5\n" |
| 44 | " stxr %w2, %w1, %3\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 45 | " cbnz %w2, 1b\n", |
| 46 | /* LSE atomics */ |
| 47 | " mov %w2, %w5\n" |
| 48 | " ldadda %w2, %w0, %3\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 49 | __nops(3) |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 50 | ) |
| 51 | |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 52 | /* Did we get the lock? */ |
| 53 | " eor %w1, %w0, %w0, ror #16\n" |
| 54 | " cbz %w1, 3f\n" |
| 55 | /* |
| 56 | * No: spin on the owner. Send a local event to avoid missing an |
| 57 | * unlock before the exclusive load. |
| 58 | */ |
| 59 | " sevl\n" |
| 60 | "2: wfe\n" |
| 61 | " ldaxrh %w2, %4\n" |
| 62 | " eor %w1, %w2, %w0, lsr #16\n" |
| 63 | " cbnz %w1, 2b\n" |
| 64 | /* We got the lock. Critical section starts here. */ |
| 65 | "3:" |
| 66 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) |
| 67 | : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) |
| 68 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
| 72 | { |
| 73 | unsigned int tmp; |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 74 | arch_spinlock_t lockval; |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 75 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 76 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 77 | /* LL/SC */ |
| 78 | " prfm pstl1strm, %2\n" |
| 79 | "1: ldaxr %w0, %2\n" |
| 80 | " eor %w1, %w0, %w0, ror #16\n" |
| 81 | " cbnz %w1, 2f\n" |
| 82 | " add %w0, %w0, %3\n" |
| 83 | " stxr %w1, %w0, %2\n" |
| 84 | " cbnz %w1, 1b\n" |
| 85 | "2:", |
| 86 | /* LSE atomics */ |
| 87 | " ldr %w0, %2\n" |
| 88 | " eor %w1, %w0, %w0, ror #16\n" |
| 89 | " cbnz %w1, 1f\n" |
| 90 | " add %w1, %w0, %3\n" |
| 91 | " casa %w0, %w1, %2\n" |
| 92 | " and %w1, %w1, #0xffff\n" |
| 93 | " eor %w1, %w1, %w0, lsr #16\n" |
| 94 | "1:") |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 95 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) |
| 96 | : "I" (1 << TICKET_SHIFT) |
| 97 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 98 | |
| 99 | return !tmp; |
| 100 | } |
| 101 | |
| 102 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
| 103 | { |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 104 | unsigned long tmp; |
| 105 | |
| 106 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 107 | /* LL/SC */ |
Will Deacon | c1d7cd2 | 2015-07-28 14:48:00 +0100 | [diff] [blame] | 108 | " ldrh %w1, %0\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 109 | " add %w1, %w1, #1\n" |
| 110 | " stlrh %w1, %0", |
| 111 | /* LSE atomics */ |
| 112 | " mov %w1, #1\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 113 | " staddlh %w1, %0\n" |
| 114 | __nops(1)) |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 115 | : "=Q" (lock->owner), "=&r" (tmp) |
| 116 | : |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 117 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 118 | } |
| 119 | |
Will Deacon | 5686b06 | 2013-10-09 15:54:27 +0100 | [diff] [blame] | 120 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
| 121 | { |
| 122 | return lock.owner == lock.next; |
| 123 | } |
| 124 | |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 125 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
| 126 | { |
Paul E. McKenney | 952111d | 2017-06-29 15:53:02 -0700 | [diff] [blame^] | 127 | /* |
| 128 | * Ensure prior spin_lock operations to other locks have completed |
| 129 | * on this CPU before we test whether "lock" is locked. |
| 130 | */ |
| 131 | smp_mb(); /* ^^^ */ |
Christian Borntraeger | af2e7aa | 2014-11-24 10:53:11 +0100 | [diff] [blame] | 132 | return !arch_spin_value_unlocked(READ_ONCE(*lock)); |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
| 136 | { |
Christian Borntraeger | af2e7aa | 2014-11-24 10:53:11 +0100 | [diff] [blame] | 137 | arch_spinlock_t lockval = READ_ONCE(*lock); |
Will Deacon | 52ea2a5 | 2013-10-09 15:54:26 +0100 | [diff] [blame] | 138 | return (lockval.next - lockval.owner) > 1; |
| 139 | } |
| 140 | #define arch_spin_is_contended arch_spin_is_contended |
| 141 | |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 142 | /* |
| 143 | * Write lock implementation. |
| 144 | * |
| 145 | * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is |
| 146 | * exclusively held. |
| 147 | * |
| 148 | * The memory barriers are implicit with the load-acquire and store-release |
| 149 | * instructions. |
| 150 | */ |
| 151 | |
| 152 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 153 | { |
| 154 | unsigned int tmp; |
| 155 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 156 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 157 | /* LL/SC */ |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 158 | " sevl\n" |
| 159 | "1: wfe\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 160 | "2: ldaxr %w0, %1\n" |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 161 | " cbnz %w0, 1b\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 162 | " stxr %w0, %w2, %1\n" |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 163 | " cbnz %w0, 2b\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 164 | __nops(1), |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 165 | /* LSE atomics */ |
| 166 | "1: mov %w0, wzr\n" |
| 167 | "2: casa %w0, %w2, %1\n" |
| 168 | " cbz %w0, 3f\n" |
| 169 | " ldxr %w0, %1\n" |
| 170 | " cbz %w0, 2b\n" |
| 171 | " wfe\n" |
| 172 | " b 1b\n" |
| 173 | "3:") |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 174 | : "=&r" (tmp), "+Q" (rw->lock) |
| 175 | : "r" (0x80000000) |
Will Deacon | 95c4189 | 2014-02-04 12:29:13 +0000 | [diff] [blame] | 176 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 180 | { |
| 181 | unsigned int tmp; |
| 182 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 183 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 184 | /* LL/SC */ |
Will Deacon | 9511ca1 | 2015-07-22 18:25:52 +0100 | [diff] [blame] | 185 | "1: ldaxr %w0, %1\n" |
| 186 | " cbnz %w0, 2f\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 187 | " stxr %w0, %w2, %1\n" |
Will Deacon | 9511ca1 | 2015-07-22 18:25:52 +0100 | [diff] [blame] | 188 | " cbnz %w0, 1b\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 189 | "2:", |
| 190 | /* LSE atomics */ |
| 191 | " mov %w0, wzr\n" |
| 192 | " casa %w0, %w2, %1\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 193 | __nops(2)) |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 194 | : "=&r" (tmp), "+Q" (rw->lock) |
| 195 | : "r" (0x80000000) |
Will Deacon | 95c4189 | 2014-02-04 12:29:13 +0000 | [diff] [blame] | 196 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 197 | |
| 198 | return !tmp; |
| 199 | } |
| 200 | |
| 201 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 202 | { |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 203 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 204 | " stlr wzr, %0", |
| 205 | " swpl wzr, wzr, %0") |
| 206 | : "=Q" (rw->lock) :: "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | /* write_can_lock - would write_trylock() succeed? */ |
| 210 | #define arch_write_can_lock(x) ((x)->lock == 0) |
| 211 | |
| 212 | /* |
| 213 | * Read lock implementation. |
| 214 | * |
| 215 | * It exclusively loads the lock value, increments it and stores the new value |
| 216 | * back if positive and the CPU still exclusively owns the location. If the |
| 217 | * value is negative, the lock is already held. |
| 218 | * |
| 219 | * During unlocking there may be multiple active read locks but no write lock. |
| 220 | * |
| 221 | * The memory barriers are implicit with the load-acquire and store-release |
| 222 | * instructions. |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 223 | * |
| 224 | * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC |
| 225 | * and LSE implementations may exhibit different behaviour (although this |
| 226 | * will have no effect on lockdep). |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 227 | */ |
| 228 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 229 | { |
| 230 | unsigned int tmp, tmp2; |
| 231 | |
| 232 | asm volatile( |
| 233 | " sevl\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 234 | ARM64_LSE_ATOMIC_INSN( |
| 235 | /* LL/SC */ |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 236 | "1: wfe\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 237 | "2: ldaxr %w0, %2\n" |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 238 | " add %w0, %w0, #1\n" |
| 239 | " tbnz %w0, #31, 1b\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 240 | " stxr %w1, %w0, %2\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 241 | " cbnz %w1, 2b\n" |
| 242 | __nops(1), |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 243 | /* LSE atomics */ |
| 244 | "1: wfe\n" |
| 245 | "2: ldxr %w0, %2\n" |
| 246 | " adds %w1, %w0, #1\n" |
| 247 | " tbnz %w1, #31, 1b\n" |
| 248 | " casa %w0, %w1, %2\n" |
| 249 | " sbc %w0, %w1, %w0\n" |
| 250 | " cbnz %w0, 2b") |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 251 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
| 252 | : |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 253 | : "cc", "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 254 | } |
| 255 | |
| 256 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 257 | { |
| 258 | unsigned int tmp, tmp2; |
| 259 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 260 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 261 | /* LL/SC */ |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 262 | "1: ldxr %w0, %2\n" |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 263 | " sub %w0, %w0, #1\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 264 | " stlxr %w1, %w0, %2\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 265 | " cbnz %w1, 1b", |
| 266 | /* LSE atomics */ |
| 267 | " movn %w0, #0\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 268 | " staddl %w0, %2\n" |
| 269 | __nops(2)) |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 270 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
| 271 | : |
Will Deacon | 95c4189 | 2014-02-04 12:29:13 +0000 | [diff] [blame] | 272 | : "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 273 | } |
| 274 | |
| 275 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
| 276 | { |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 277 | unsigned int tmp, tmp2; |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 278 | |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 279 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
| 280 | /* LL/SC */ |
| 281 | " mov %w1, #1\n" |
Will Deacon | 9511ca1 | 2015-07-22 18:25:52 +0100 | [diff] [blame] | 282 | "1: ldaxr %w0, %2\n" |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 283 | " add %w0, %w0, #1\n" |
Will Deacon | 9511ca1 | 2015-07-22 18:25:52 +0100 | [diff] [blame] | 284 | " tbnz %w0, #31, 2f\n" |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 285 | " stxr %w1, %w0, %2\n" |
Will Deacon | 9511ca1 | 2015-07-22 18:25:52 +0100 | [diff] [blame] | 286 | " cbnz %w1, 1b\n" |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 287 | "2:", |
| 288 | /* LSE atomics */ |
| 289 | " ldr %w0, %2\n" |
| 290 | " adds %w1, %w0, #1\n" |
| 291 | " tbnz %w1, #31, 1f\n" |
| 292 | " casa %w0, %w1, %2\n" |
| 293 | " sbc %w1, %w1, %w0\n" |
Will Deacon | 05492f2 | 2016-09-06 16:42:58 +0100 | [diff] [blame] | 294 | __nops(1) |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 295 | "1:") |
| 296 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
Will Deacon | 3a0310e | 2013-02-04 12:12:33 +0000 | [diff] [blame] | 297 | : |
Will Deacon | 81bb5c6 | 2015-02-10 03:03:15 +0000 | [diff] [blame] | 298 | : "cc", "memory"); |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 299 | |
| 300 | return !tmp2; |
| 301 | } |
| 302 | |
| 303 | /* read_can_lock - would read_trylock() succeed? */ |
| 304 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) |
| 305 | |
| 306 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 307 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 308 | |
| 309 | #define arch_spin_relax(lock) cpu_relax() |
| 310 | #define arch_read_relax(lock) cpu_relax() |
| 311 | #define arch_write_relax(lock) cpu_relax() |
| 312 | |
Will Deacon | 872c63f | 2016-09-05 11:56:05 +0100 | [diff] [blame] | 313 | /* |
| 314 | * Accesses appearing in program order before a spin_lock() operation |
| 315 | * can be reordered with accesses inside the critical section, by virtue |
| 316 | * of arch_spin_lock being constructed using acquire semantics. |
| 317 | * |
| 318 | * In cases where this is problematic (e.g. try_to_wake_up), an |
| 319 | * smp_mb__before_spinlock() can restore the required ordering. |
| 320 | */ |
| 321 | #define smp_mb__before_spinlock() smp_mb() |
| 322 | |
Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 323 | #endif /* __ASM_SPINLOCK_H */ |