blob: c85e96d174a5fbd4764adb748b9bc11c70479617 [file] [log] [blame]
Catalin Marinas08e875c2012-03-05 11:49:30 +00001/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SPINLOCK_H
17#define __ASM_SPINLOCK_H
18
Will Deacon81bb5c62015-02-10 03:03:15 +000019#include <asm/lse.h>
Catalin Marinas08e875c2012-03-05 11:49:30 +000020#include <asm/spinlock_types.h>
21#include <asm/processor.h>
22
23/*
24 * Spinlock implementation.
25 *
Catalin Marinas08e875c2012-03-05 11:49:30 +000026 * The memory barriers are implicit with the load-acquire and store-release
27 * instructions.
Catalin Marinas08e875c2012-03-05 11:49:30 +000028 */
29
Catalin Marinas08e875c2012-03-05 11:49:30 +000030#define arch_spin_unlock_wait(lock) \
31 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
32
33#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
34
35static inline void arch_spin_lock(arch_spinlock_t *lock)
36{
37 unsigned int tmp;
Will Deacon52ea2a52013-10-09 15:54:26 +010038 arch_spinlock_t lockval, newval;
Catalin Marinas08e875c2012-03-05 11:49:30 +000039
40 asm volatile(
Will Deacon52ea2a52013-10-09 15:54:26 +010041 /* Atomically increment the next ticket. */
Will Deacon81bb5c62015-02-10 03:03:15 +000042 ARM64_LSE_ATOMIC_INSN(
43 /* LL/SC */
Will Deacon52ea2a52013-10-09 15:54:26 +010044" prfm pstl1strm, %3\n"
45"1: ldaxr %w0, %3\n"
46" add %w1, %w0, %w5\n"
47" stxr %w2, %w1, %3\n"
Will Deacon81bb5c62015-02-10 03:03:15 +000048" cbnz %w2, 1b\n",
49 /* LSE atomics */
50" mov %w2, %w5\n"
51" ldadda %w2, %w0, %3\n"
52" nop\n"
53" nop\n"
54" nop\n"
55 )
56
Will Deacon52ea2a52013-10-09 15:54:26 +010057 /* Did we get the lock? */
58" eor %w1, %w0, %w0, ror #16\n"
59" cbz %w1, 3f\n"
60 /*
61 * No: spin on the owner. Send a local event to avoid missing an
62 * unlock before the exclusive load.
63 */
64" sevl\n"
65"2: wfe\n"
66" ldaxrh %w2, %4\n"
67" eor %w1, %w2, %w0, lsr #16\n"
68" cbnz %w1, 2b\n"
69 /* We got the lock. Critical section starts here. */
70"3:"
71 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
72 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
73 : "memory");
Catalin Marinas08e875c2012-03-05 11:49:30 +000074}
75
76static inline int arch_spin_trylock(arch_spinlock_t *lock)
77{
78 unsigned int tmp;
Will Deacon52ea2a52013-10-09 15:54:26 +010079 arch_spinlock_t lockval;
Catalin Marinas08e875c2012-03-05 11:49:30 +000080
Will Deacon81bb5c62015-02-10 03:03:15 +000081 asm volatile(ARM64_LSE_ATOMIC_INSN(
82 /* LL/SC */
83 " prfm pstl1strm, %2\n"
84 "1: ldaxr %w0, %2\n"
85 " eor %w1, %w0, %w0, ror #16\n"
86 " cbnz %w1, 2f\n"
87 " add %w0, %w0, %3\n"
88 " stxr %w1, %w0, %2\n"
89 " cbnz %w1, 1b\n"
90 "2:",
91 /* LSE atomics */
92 " ldr %w0, %2\n"
93 " eor %w1, %w0, %w0, ror #16\n"
94 " cbnz %w1, 1f\n"
95 " add %w1, %w0, %3\n"
96 " casa %w0, %w1, %2\n"
97 " and %w1, %w1, #0xffff\n"
98 " eor %w1, %w1, %w0, lsr #16\n"
99 "1:")
Will Deacon52ea2a52013-10-09 15:54:26 +0100100 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
101 : "I" (1 << TICKET_SHIFT)
102 : "memory");
Catalin Marinas08e875c2012-03-05 11:49:30 +0000103
104 return !tmp;
105}
106
107static inline void arch_spin_unlock(arch_spinlock_t *lock)
108{
Will Deacon81bb5c62015-02-10 03:03:15 +0000109 unsigned long tmp;
110
111 asm volatile(ARM64_LSE_ATOMIC_INSN(
112 /* LL/SC */
Will Deaconc1d7cd22015-07-28 14:48:00 +0100113 " ldrh %w1, %0\n"
Will Deacon81bb5c62015-02-10 03:03:15 +0000114 " add %w1, %w1, #1\n"
115 " stlrh %w1, %0",
116 /* LSE atomics */
117 " mov %w1, #1\n"
118 " nop\n"
119 " staddlh %w1, %0")
120 : "=Q" (lock->owner), "=&r" (tmp)
121 :
Will Deacon52ea2a52013-10-09 15:54:26 +0100122 : "memory");
Catalin Marinas08e875c2012-03-05 11:49:30 +0000123}
124
Will Deacon5686b062013-10-09 15:54:27 +0100125static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
126{
127 return lock.owner == lock.next;
128}
129
Will Deacon52ea2a52013-10-09 15:54:26 +0100130static inline int arch_spin_is_locked(arch_spinlock_t *lock)
131{
Christian Borntraegeraf2e7aa2014-11-24 10:53:11 +0100132 return !arch_spin_value_unlocked(READ_ONCE(*lock));
Will Deacon52ea2a52013-10-09 15:54:26 +0100133}
134
135static inline int arch_spin_is_contended(arch_spinlock_t *lock)
136{
Christian Borntraegeraf2e7aa2014-11-24 10:53:11 +0100137 arch_spinlock_t lockval = READ_ONCE(*lock);
Will Deacon52ea2a52013-10-09 15:54:26 +0100138 return (lockval.next - lockval.owner) > 1;
139}
140#define arch_spin_is_contended arch_spin_is_contended
141
Catalin Marinas08e875c2012-03-05 11:49:30 +0000142/*
143 * Write lock implementation.
144 *
145 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
146 * exclusively held.
147 *
148 * The memory barriers are implicit with the load-acquire and store-release
149 * instructions.
150 */
151
152static inline void arch_write_lock(arch_rwlock_t *rw)
153{
154 unsigned int tmp;
155
Will Deacon81bb5c62015-02-10 03:03:15 +0000156 asm volatile(ARM64_LSE_ATOMIC_INSN(
157 /* LL/SC */
Catalin Marinas08e875c2012-03-05 11:49:30 +0000158 " sevl\n"
159 "1: wfe\n"
Will Deacon3a0310e2013-02-04 12:12:33 +0000160 "2: ldaxr %w0, %1\n"
Catalin Marinas08e875c2012-03-05 11:49:30 +0000161 " cbnz %w0, 1b\n"
Will Deacon3a0310e2013-02-04 12:12:33 +0000162 " stxr %w0, %w2, %1\n"
Catalin Marinas08e875c2012-03-05 11:49:30 +0000163 " cbnz %w0, 2b\n"
Will Deacon81bb5c62015-02-10 03:03:15 +0000164 " nop",
165 /* LSE atomics */
166 "1: mov %w0, wzr\n"
167 "2: casa %w0, %w2, %1\n"
168 " cbz %w0, 3f\n"
169 " ldxr %w0, %1\n"
170 " cbz %w0, 2b\n"
171 " wfe\n"
172 " b 1b\n"
173 "3:")
Will Deacon3a0310e2013-02-04 12:12:33 +0000174 : "=&r" (tmp), "+Q" (rw->lock)
175 : "r" (0x80000000)
Will Deacon95c41892014-02-04 12:29:13 +0000176 : "memory");
Catalin Marinas08e875c2012-03-05 11:49:30 +0000177}
178
179static inline int arch_write_trylock(arch_rwlock_t *rw)
180{
181 unsigned int tmp;
182
Will Deacon81bb5c62015-02-10 03:03:15 +0000183 asm volatile(ARM64_LSE_ATOMIC_INSN(
184 /* LL/SC */
Will Deacon9511ca12015-07-22 18:25:52 +0100185 "1: ldaxr %w0, %1\n"
186 " cbnz %w0, 2f\n"
Will Deacon3a0310e2013-02-04 12:12:33 +0000187 " stxr %w0, %w2, %1\n"
Will Deacon9511ca12015-07-22 18:25:52 +0100188 " cbnz %w0, 1b\n"
Will Deacon81bb5c62015-02-10 03:03:15 +0000189 "2:",
190 /* LSE atomics */
191 " mov %w0, wzr\n"
192 " casa %w0, %w2, %1\n"
193 " nop\n"
194 " nop")
Will Deacon3a0310e2013-02-04 12:12:33 +0000195 : "=&r" (tmp), "+Q" (rw->lock)
196 : "r" (0x80000000)
Will Deacon95c41892014-02-04 12:29:13 +0000197 : "memory");
Catalin Marinas08e875c2012-03-05 11:49:30 +0000198
199 return !tmp;
200}
201
202static inline void arch_write_unlock(arch_rwlock_t *rw)
203{
Will Deacon81bb5c62015-02-10 03:03:15 +0000204 asm volatile(ARM64_LSE_ATOMIC_INSN(
205 " stlr wzr, %0",
206 " swpl wzr, wzr, %0")
207 : "=Q" (rw->lock) :: "memory");
Catalin Marinas08e875c2012-03-05 11:49:30 +0000208}
209
210/* write_can_lock - would write_trylock() succeed? */
211#define arch_write_can_lock(x) ((x)->lock == 0)
212
213/*
214 * Read lock implementation.
215 *
216 * It exclusively loads the lock value, increments it and stores the new value
217 * back if positive and the CPU still exclusively owns the location. If the
218 * value is negative, the lock is already held.
219 *
220 * During unlocking there may be multiple active read locks but no write lock.
221 *
222 * The memory barriers are implicit with the load-acquire and store-release
223 * instructions.
Will Deacon81bb5c62015-02-10 03:03:15 +0000224 *
225 * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
226 * and LSE implementations may exhibit different behaviour (although this
227 * will have no effect on lockdep).
Catalin Marinas08e875c2012-03-05 11:49:30 +0000228 */
229static inline void arch_read_lock(arch_rwlock_t *rw)
230{
231 unsigned int tmp, tmp2;
232
233 asm volatile(
234 " sevl\n"
Will Deacon81bb5c62015-02-10 03:03:15 +0000235 ARM64_LSE_ATOMIC_INSN(
236 /* LL/SC */
Catalin Marinas08e875c2012-03-05 11:49:30 +0000237 "1: wfe\n"
Will Deacon3a0310e2013-02-04 12:12:33 +0000238 "2: ldaxr %w0, %2\n"
Catalin Marinas08e875c2012-03-05 11:49:30 +0000239 " add %w0, %w0, #1\n"
240 " tbnz %w0, #31, 1b\n"
Will Deacon3a0310e2013-02-04 12:12:33 +0000241 " stxr %w1, %w0, %2\n"
Will Deacon81bb5c62015-02-10 03:03:15 +0000242 " nop\n"
243 " cbnz %w1, 2b",
244 /* LSE atomics */
245 "1: wfe\n"
246 "2: ldxr %w0, %2\n"
247 " adds %w1, %w0, #1\n"
248 " tbnz %w1, #31, 1b\n"
249 " casa %w0, %w1, %2\n"
250 " sbc %w0, %w1, %w0\n"
251 " cbnz %w0, 2b")
Will Deacon3a0310e2013-02-04 12:12:33 +0000252 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
253 :
Will Deacon81bb5c62015-02-10 03:03:15 +0000254 : "cc", "memory");
Catalin Marinas08e875c2012-03-05 11:49:30 +0000255}
256
257static inline void arch_read_unlock(arch_rwlock_t *rw)
258{
259 unsigned int tmp, tmp2;
260
Will Deacon81bb5c62015-02-10 03:03:15 +0000261 asm volatile(ARM64_LSE_ATOMIC_INSN(
262 /* LL/SC */
Will Deacon3a0310e2013-02-04 12:12:33 +0000263 "1: ldxr %w0, %2\n"
Catalin Marinas08e875c2012-03-05 11:49:30 +0000264 " sub %w0, %w0, #1\n"
Will Deacon3a0310e2013-02-04 12:12:33 +0000265 " stlxr %w1, %w0, %2\n"
Will Deacon81bb5c62015-02-10 03:03:15 +0000266 " cbnz %w1, 1b",
267 /* LSE atomics */
268 " movn %w0, #0\n"
269 " nop\n"
270 " nop\n"
271 " staddl %w0, %2")
Will Deacon3a0310e2013-02-04 12:12:33 +0000272 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
273 :
Will Deacon95c41892014-02-04 12:29:13 +0000274 : "memory");
Catalin Marinas08e875c2012-03-05 11:49:30 +0000275}
276
277static inline int arch_read_trylock(arch_rwlock_t *rw)
278{
Will Deacon81bb5c62015-02-10 03:03:15 +0000279 unsigned int tmp, tmp2;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000280
Will Deacon81bb5c62015-02-10 03:03:15 +0000281 asm volatile(ARM64_LSE_ATOMIC_INSN(
282 /* LL/SC */
283 " mov %w1, #1\n"
Will Deacon9511ca12015-07-22 18:25:52 +0100284 "1: ldaxr %w0, %2\n"
Catalin Marinas08e875c2012-03-05 11:49:30 +0000285 " add %w0, %w0, #1\n"
Will Deacon9511ca12015-07-22 18:25:52 +0100286 " tbnz %w0, #31, 2f\n"
Will Deacon3a0310e2013-02-04 12:12:33 +0000287 " stxr %w1, %w0, %2\n"
Will Deacon9511ca12015-07-22 18:25:52 +0100288 " cbnz %w1, 1b\n"
Will Deacon81bb5c62015-02-10 03:03:15 +0000289 "2:",
290 /* LSE atomics */
291 " ldr %w0, %2\n"
292 " adds %w1, %w0, #1\n"
293 " tbnz %w1, #31, 1f\n"
294 " casa %w0, %w1, %2\n"
295 " sbc %w1, %w1, %w0\n"
296 " nop\n"
297 "1:")
298 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
Will Deacon3a0310e2013-02-04 12:12:33 +0000299 :
Will Deacon81bb5c62015-02-10 03:03:15 +0000300 : "cc", "memory");
Catalin Marinas08e875c2012-03-05 11:49:30 +0000301
302 return !tmp2;
303}
304
305/* read_can_lock - would read_trylock() succeed? */
306#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
307
308#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
309#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
310
311#define arch_spin_relax(lock) cpu_relax()
312#define arch_read_relax(lock) cpu_relax()
313#define arch_write_relax(lock) cpu_relax()
314
315#endif /* __ASM_SPINLOCK_H */