blob: 099c78fcf62d43cd0a123b4d520d44a5d853a813 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __ASM_SPINLOCK_H
3#define __ASM_SPINLOCK_H
4
5#if __LINUX_ARM_ARCH__ < 6
6#error SMP not supported on pre-ARMv6 CPUs
7#endif
8
Will Deacon9bb17be2013-07-02 14:54:33 +01009#include <linux/prefetch.h>
Peter Zijlstra726328d2016-05-26 10:35:03 +020010#include <asm/barrier.h>
11#include <asm/processor.h>
Marc Zyngier603605a2011-05-23 17:16:59 +010012
Russell King000d9c72011-01-15 16:22:12 +000013/*
14 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
15 * extensions, so when running on UP, we have to patch these instructions away.
16 */
Russell King000d9c72011-01-15 16:22:12 +000017#ifdef CONFIG_THUMB2_KERNEL
Dave Martin917692f2011-02-09 12:06:59 +010018/*
19 * For Thumb-2, special care is needed to ensure that the conditional WFE
20 * instruction really does assemble to exactly 4 bytes (as required by
21 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
22 * assembler to insert a extra (16-bit) IT instruction, depending on the
23 * presence or absence of neighbouring conditional instructions.
24 *
25 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
26 * the assembler won't change IT instructions which are explicitly present
27 * in the input.
28 */
Will Deacon27a84792013-07-02 12:10:42 +010029#define WFE(cond) __ALT_SMP_ASM( \
Dave Martin917692f2011-02-09 12:06:59 +010030 "it " cond "\n\t" \
31 "wfe" cond ".n", \
32 \
33 "nop.w" \
34)
Russell King000d9c72011-01-15 16:22:12 +000035#else
Will Deacon27a84792013-07-02 12:10:42 +010036#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
Russell King000d9c72011-01-15 16:22:12 +000037#endif
38
Will Deacon27a84792013-07-02 12:10:42 +010039#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
40
Rabin Vincentc5113b62010-01-25 19:43:03 +010041static inline void dsb_sev(void)
42{
Will Deacon7c8746a2014-02-07 19:12:32 +010043
44 dsb(ishst);
45 __asm__(SEV);
Rabin Vincentc5113b62010-01-25 19:43:03 +010046}
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/*
Will Deacon546c2892012-07-06 15:43:41 +010049 * ARMv6 ticket-based spin-locking.
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 *
Will Deacon546c2892012-07-06 15:43:41 +010051 * A memory barrier is required after we get a lock, and before we
52 * release it, because V6 CPUs are assumed to have weakly ordered
53 * memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010056static inline void arch_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
58 unsigned long tmp;
Will Deacon546c2892012-07-06 15:43:41 +010059 u32 newval;
60 arch_spinlock_t lockval;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Will Deacon9bb17be2013-07-02 14:54:33 +010062 prefetchw(&lock->slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 __asm__ __volatile__(
Will Deacon546c2892012-07-06 15:43:41 +010064"1: ldrex %0, [%3]\n"
65" add %1, %0, %4\n"
66" strex %2, %1, [%3]\n"
67" teq %2, #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070068" bne 1b"
Will Deacon546c2892012-07-06 15:43:41 +010069 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
70 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
Russell King6d9b37a2005-07-26 19:44:26 +010071 : "cc");
72
Will Deacon546c2892012-07-06 15:43:41 +010073 while (lockval.tickets.next != lockval.tickets.owner) {
74 wfe();
Mark Rutland6aa7de02017-10-23 14:07:29 -070075 lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
Will Deacon546c2892012-07-06 15:43:41 +010076 }
77
Russell King6d9b37a2005-07-26 19:44:26 +010078 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010081static inline int arch_spin_trylock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082{
Will Deacon15e7e5c2013-06-05 11:27:26 +010083 unsigned long contended, res;
Will Deacon546c2892012-07-06 15:43:41 +010084 u32 slock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Will Deacon9bb17be2013-07-02 14:54:33 +010086 prefetchw(&lock->slock);
Will Deacon15e7e5c2013-06-05 11:27:26 +010087 do {
88 __asm__ __volatile__(
89 " ldrex %0, [%3]\n"
90 " mov %2, #0\n"
91 " subs %1, %0, %0, ror #16\n"
92 " addeq %0, %0, %4\n"
93 " strexeq %2, %0, [%3]"
Will Deaconafa31d82013-08-12 18:03:26 +010094 : "=&r" (slock), "=&r" (contended), "=&r" (res)
Will Deacon15e7e5c2013-06-05 11:27:26 +010095 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
96 : "cc");
97 } while (res);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Will Deacon15e7e5c2013-06-05 11:27:26 +010099 if (!contended) {
Russell King6d9b37a2005-07-26 19:44:26 +0100100 smp_mb();
101 return 1;
102 } else {
103 return 0;
104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105}
106
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100107static inline void arch_spin_unlock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
Russell King6d9b37a2005-07-26 19:44:26 +0100109 smp_mb();
Will Deacon20e260b2013-01-24 14:47:38 +0100110 lock->tickets.owner++;
Rabin Vincentc5113b62010-01-25 19:43:03 +0100111 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112}
113
Will Deacon0cbad9c2013-10-09 17:19:22 +0100114static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
115{
116 return lock.tickets.owner == lock.tickets.next;
117}
118
Will Deacon546c2892012-07-06 15:43:41 +0100119static inline int arch_spin_is_locked(arch_spinlock_t *lock)
120{
Christian Borntraeger488beef2014-11-25 11:44:26 +0100121 return !arch_spin_value_unlocked(READ_ONCE(*lock));
Will Deacon546c2892012-07-06 15:43:41 +0100122}
123
124static inline int arch_spin_is_contended(arch_spinlock_t *lock)
125{
Christian Borntraeger488beef2014-11-25 11:44:26 +0100126 struct __raw_tickets tickets = READ_ONCE(lock->tickets);
Will Deacon546c2892012-07-06 15:43:41 +0100127 return (tickets.next - tickets.owner) > 1;
128}
129#define arch_spin_is_contended arch_spin_is_contended
130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131/*
132 * RWLOCKS
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700133 *
134 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 * Write locks are easy - we just set bit 31. When unlocking, we can
136 * just write zero since the lock is exclusively held.
137 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700138
Thomas Gleixnere5931942009-12-03 20:08:46 +0100139static inline void arch_write_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
141 unsigned long tmp;
142
Will Deacon9bb17be2013-07-02 14:54:33 +0100143 prefetchw(&rw->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 __asm__ __volatile__(
145"1: ldrex %0, [%1]\n"
146" teq %0, #0\n"
Russell King000d9c72011-01-15 16:22:12 +0000147 WFE("ne")
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148" strexeq %0, %2, [%1]\n"
149" teq %0, #0\n"
150" bne 1b"
151 : "=&r" (tmp)
152 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100153 : "cc");
154
155 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156}
157
Thomas Gleixnere5931942009-12-03 20:08:46 +0100158static inline int arch_write_trylock(arch_rwlock_t *rw)
Russell King4e8fd222005-07-24 12:13:40 +0100159{
Will Deacon00efaa02013-08-12 18:04:05 +0100160 unsigned long contended, res;
Russell King4e8fd222005-07-24 12:13:40 +0100161
Will Deacon9bb17be2013-07-02 14:54:33 +0100162 prefetchw(&rw->lock);
Will Deacon00efaa02013-08-12 18:04:05 +0100163 do {
164 __asm__ __volatile__(
165 " ldrex %0, [%2]\n"
166 " mov %1, #0\n"
167 " teq %0, #0\n"
168 " strexeq %1, %3, [%2]"
169 : "=&r" (contended), "=&r" (res)
170 : "r" (&rw->lock), "r" (0x80000000)
171 : "cc");
172 } while (res);
Russell King4e8fd222005-07-24 12:13:40 +0100173
Will Deacon00efaa02013-08-12 18:04:05 +0100174 if (!contended) {
Russell King6d9b37a2005-07-26 19:44:26 +0100175 smp_mb();
176 return 1;
177 } else {
178 return 0;
179 }
Russell King4e8fd222005-07-24 12:13:40 +0100180}
181
Thomas Gleixnere5931942009-12-03 20:08:46 +0100182static inline void arch_write_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
Russell King6d9b37a2005-07-26 19:44:26 +0100184 smp_mb();
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +0000187 "str %1, [%0]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 :
189 : "r" (&rw->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100190 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100191
192 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
195/*
196 * Read locks are a bit more hairy:
197 * - Exclusively load the lock value.
198 * - Increment it.
199 * - Store new lock value if positive, and we still own this location.
200 * If the value is negative, we've already failed.
201 * - If we failed to store the value, we want a negative result.
202 * - If we failed, try again.
203 * Unlocking is similarly hairy. We may have multiple read locks
204 * currently active. However, we know we won't have any write
205 * locks.
206 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100207static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
209 unsigned long tmp, tmp2;
210
Will Deacon9bb17be2013-07-02 14:54:33 +0100211 prefetchw(&rw->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 __asm__ __volatile__(
213"1: ldrex %0, [%2]\n"
214" adds %0, %0, #1\n"
215" strexpl %1, %0, [%2]\n"
Russell King000d9c72011-01-15 16:22:12 +0000216 WFE("mi")
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217" rsbpls %0, %1, #0\n"
218" bmi 1b"
219 : "=&r" (tmp), "=&r" (tmp2)
220 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100221 : "cc");
222
223 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224}
225
Thomas Gleixnere5931942009-12-03 20:08:46 +0100226static inline void arch_read_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
Russell King4e8fd222005-07-24 12:13:40 +0100228 unsigned long tmp, tmp2;
229
Russell King6d9b37a2005-07-26 19:44:26 +0100230 smp_mb();
231
Will Deacon9bb17be2013-07-02 14:54:33 +0100232 prefetchw(&rw->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 __asm__ __volatile__(
234"1: ldrex %0, [%2]\n"
235" sub %0, %0, #1\n"
236" strex %1, %0, [%2]\n"
237" teq %1, #0\n"
238" bne 1b"
239 : "=&r" (tmp), "=&r" (tmp2)
240 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100241 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100242
243 if (tmp == 0)
244 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245}
246
Thomas Gleixnere5931942009-12-03 20:08:46 +0100247static inline int arch_read_trylock(arch_rwlock_t *rw)
Russell King8e347032006-08-31 15:09:30 +0100248{
Will Deacon00efaa02013-08-12 18:04:05 +0100249 unsigned long contended, res;
Russell King8e347032006-08-31 15:09:30 +0100250
Will Deacon9bb17be2013-07-02 14:54:33 +0100251 prefetchw(&rw->lock);
Will Deacon00efaa02013-08-12 18:04:05 +0100252 do {
253 __asm__ __volatile__(
254 " ldrex %0, [%2]\n"
255 " mov %1, #0\n"
256 " adds %0, %0, #1\n"
257 " strexpl %1, %0, [%2]"
258 : "=&r" (contended), "=&r" (res)
259 : "r" (&rw->lock)
260 : "cc");
261 } while (res);
Russell King8e347032006-08-31 15:09:30 +0100262
Will Deacon00efaa02013-08-12 18:04:05 +0100263 /* If the lock is negative, then it is already held for write. */
264 if (contended < 0x80000000) {
265 smp_mb();
266 return 1;
267 } else {
268 return 0;
269 }
Russell King8e347032006-08-31 15:09:30 +0100270}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272#endif /* __ASM_SPINLOCK_H */