blob: ac4bfae26702b0be3333c1184f1ac552f6c0eddd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#if __LINUX_ARM_ARCH__ < 6
5#error SMP not supported on pre-ARMv6 CPUs
6#endif
7
Will Deacon9bb17be2013-07-02 14:54:33 +01008#include <linux/prefetch.h>
Marc Zyngier603605a2011-05-23 17:16:59 +01009
Russell King000d9c72011-01-15 16:22:12 +000010/*
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away.
13 */
Russell King000d9c72011-01-15 16:22:12 +000014#ifdef CONFIG_THUMB2_KERNEL
Dave Martin917692f2011-02-09 12:06:59 +010015/*
16 * For Thumb-2, special care is needed to ensure that the conditional WFE
17 * instruction really does assemble to exactly 4 bytes (as required by
18 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
19 * assembler to insert a extra (16-bit) IT instruction, depending on the
20 * presence or absence of neighbouring conditional instructions.
21 *
22 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
23 * the assembler won't change IT instructions which are explicitly present
24 * in the input.
25 */
Will Deacon27a84792013-07-02 12:10:42 +010026#define WFE(cond) __ALT_SMP_ASM( \
Dave Martin917692f2011-02-09 12:06:59 +010027 "it " cond "\n\t" \
28 "wfe" cond ".n", \
29 \
30 "nop.w" \
31)
Russell King000d9c72011-01-15 16:22:12 +000032#else
Will Deacon27a84792013-07-02 12:10:42 +010033#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
Russell King000d9c72011-01-15 16:22:12 +000034#endif
35
Will Deacon27a84792013-07-02 12:10:42 +010036#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
37
Rabin Vincentc5113b62010-01-25 19:43:03 +010038static inline void dsb_sev(void)
39{
Will Deacon7c8746a2014-02-07 19:12:32 +010040
41 dsb(ishst);
42 __asm__(SEV);
Rabin Vincentc5113b62010-01-25 19:43:03 +010043}
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/*
Will Deacon546c2892012-07-06 15:43:41 +010046 * ARMv6 ticket-based spin-locking.
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 *
Will Deacon546c2892012-07-06 15:43:41 +010048 * A memory barrier is required after we get a lock, and before we
49 * release it, because V6 CPUs are assumed to have weakly ordered
50 * memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010053#define arch_spin_unlock_wait(lock) \
54 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010056#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010058static inline void arch_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
60 unsigned long tmp;
Will Deacon546c2892012-07-06 15:43:41 +010061 u32 newval;
62 arch_spinlock_t lockval;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Will Deacon9bb17be2013-07-02 14:54:33 +010064 prefetchw(&lock->slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 __asm__ __volatile__(
Will Deacon546c2892012-07-06 15:43:41 +010066"1: ldrex %0, [%3]\n"
67" add %1, %0, %4\n"
68" strex %2, %1, [%3]\n"
69" teq %2, #0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070070" bne 1b"
Will Deacon546c2892012-07-06 15:43:41 +010071 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
72 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
Russell King6d9b37a2005-07-26 19:44:26 +010073 : "cc");
74
Will Deacon546c2892012-07-06 15:43:41 +010075 while (lockval.tickets.next != lockval.tickets.owner) {
76 wfe();
77 lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
78 }
79
Russell King6d9b37a2005-07-26 19:44:26 +010080 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010083static inline int arch_spin_trylock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
Will Deacon15e7e5c2013-06-05 11:27:26 +010085 unsigned long contended, res;
Will Deacon546c2892012-07-06 15:43:41 +010086 u32 slock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Will Deacon9bb17be2013-07-02 14:54:33 +010088 prefetchw(&lock->slock);
Will Deacon15e7e5c2013-06-05 11:27:26 +010089 do {
90 __asm__ __volatile__(
91 " ldrex %0, [%3]\n"
92 " mov %2, #0\n"
93 " subs %1, %0, %0, ror #16\n"
94 " addeq %0, %0, %4\n"
95 " strexeq %2, %0, [%3]"
Will Deaconafa31d82013-08-12 18:03:26 +010096 : "=&r" (slock), "=&r" (contended), "=&r" (res)
Will Deacon15e7e5c2013-06-05 11:27:26 +010097 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
98 : "cc");
99 } while (res);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Will Deacon15e7e5c2013-06-05 11:27:26 +0100101 if (!contended) {
Russell King6d9b37a2005-07-26 19:44:26 +0100102 smp_mb();
103 return 1;
104 } else {
105 return 0;
106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100109static inline void arch_spin_unlock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
Russell King6d9b37a2005-07-26 19:44:26 +0100111 smp_mb();
Will Deacon20e260b2013-01-24 14:47:38 +0100112 lock->tickets.owner++;
Rabin Vincentc5113b62010-01-25 19:43:03 +0100113 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114}
115
Will Deacon0cbad9c2013-10-09 17:19:22 +0100116static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
117{
118 return lock.tickets.owner == lock.tickets.next;
119}
120
Will Deacon546c2892012-07-06 15:43:41 +0100121static inline int arch_spin_is_locked(arch_spinlock_t *lock)
122{
Will Deacon0cbad9c2013-10-09 17:19:22 +0100123 return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
Will Deacon546c2892012-07-06 15:43:41 +0100124}
125
126static inline int arch_spin_is_contended(arch_spinlock_t *lock)
127{
128 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
129 return (tickets.next - tickets.owner) > 1;
130}
131#define arch_spin_is_contended arch_spin_is_contended
132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133/*
134 * RWLOCKS
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700135 *
136 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 * Write locks are easy - we just set bit 31. When unlocking, we can
138 * just write zero since the lock is exclusively held.
139 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700140
Thomas Gleixnere5931942009-12-03 20:08:46 +0100141static inline void arch_write_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
143 unsigned long tmp;
144
Will Deacon9bb17be2013-07-02 14:54:33 +0100145 prefetchw(&rw->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 __asm__ __volatile__(
147"1: ldrex %0, [%1]\n"
148" teq %0, #0\n"
Russell King000d9c72011-01-15 16:22:12 +0000149 WFE("ne")
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150" strexeq %0, %2, [%1]\n"
151" teq %0, #0\n"
152" bne 1b"
153 : "=&r" (tmp)
154 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100155 : "cc");
156
157 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158}
159
Thomas Gleixnere5931942009-12-03 20:08:46 +0100160static inline int arch_write_trylock(arch_rwlock_t *rw)
Russell King4e8fd222005-07-24 12:13:40 +0100161{
Will Deacon00efaa02013-08-12 18:04:05 +0100162 unsigned long contended, res;
Russell King4e8fd222005-07-24 12:13:40 +0100163
Will Deacon9bb17be2013-07-02 14:54:33 +0100164 prefetchw(&rw->lock);
Will Deacon00efaa02013-08-12 18:04:05 +0100165 do {
166 __asm__ __volatile__(
167 " ldrex %0, [%2]\n"
168 " mov %1, #0\n"
169 " teq %0, #0\n"
170 " strexeq %1, %3, [%2]"
171 : "=&r" (contended), "=&r" (res)
172 : "r" (&rw->lock), "r" (0x80000000)
173 : "cc");
174 } while (res);
Russell King4e8fd222005-07-24 12:13:40 +0100175
Will Deacon00efaa02013-08-12 18:04:05 +0100176 if (!contended) {
Russell King6d9b37a2005-07-26 19:44:26 +0100177 smp_mb();
178 return 1;
179 } else {
180 return 0;
181 }
Russell King4e8fd222005-07-24 12:13:40 +0100182}
183
Thomas Gleixnere5931942009-12-03 20:08:46 +0100184static inline void arch_write_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
Russell King6d9b37a2005-07-26 19:44:26 +0100186 smp_mb();
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +0000189 "str %1, [%0]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 :
191 : "r" (&rw->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100192 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100193
194 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
196
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100197/* write_can_lock - would write_trylock() succeed? */
Will Deacon9bb17be2013-07-02 14:54:33 +0100198#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200/*
201 * Read locks are a bit more hairy:
202 * - Exclusively load the lock value.
203 * - Increment it.
204 * - Store new lock value if positive, and we still own this location.
205 * If the value is negative, we've already failed.
206 * - If we failed to store the value, we want a negative result.
207 * - If we failed, try again.
208 * Unlocking is similarly hairy. We may have multiple read locks
209 * currently active. However, we know we won't have any write
210 * locks.
211 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100212static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
214 unsigned long tmp, tmp2;
215
Will Deacon9bb17be2013-07-02 14:54:33 +0100216 prefetchw(&rw->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 __asm__ __volatile__(
218"1: ldrex %0, [%2]\n"
219" adds %0, %0, #1\n"
220" strexpl %1, %0, [%2]\n"
Russell King000d9c72011-01-15 16:22:12 +0000221 WFE("mi")
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222" rsbpls %0, %1, #0\n"
223" bmi 1b"
224 : "=&r" (tmp), "=&r" (tmp2)
225 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100226 : "cc");
227
228 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229}
230
Thomas Gleixnere5931942009-12-03 20:08:46 +0100231static inline void arch_read_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232{
Russell King4e8fd222005-07-24 12:13:40 +0100233 unsigned long tmp, tmp2;
234
Russell King6d9b37a2005-07-26 19:44:26 +0100235 smp_mb();
236
Will Deacon9bb17be2013-07-02 14:54:33 +0100237 prefetchw(&rw->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 __asm__ __volatile__(
239"1: ldrex %0, [%2]\n"
240" sub %0, %0, #1\n"
241" strex %1, %0, [%2]\n"
242" teq %1, #0\n"
243" bne 1b"
244 : "=&r" (tmp), "=&r" (tmp2)
245 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100246 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100247
248 if (tmp == 0)
249 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
Thomas Gleixnere5931942009-12-03 20:08:46 +0100252static inline int arch_read_trylock(arch_rwlock_t *rw)
Russell King8e347032006-08-31 15:09:30 +0100253{
Will Deacon00efaa02013-08-12 18:04:05 +0100254 unsigned long contended, res;
Russell King8e347032006-08-31 15:09:30 +0100255
Will Deacon9bb17be2013-07-02 14:54:33 +0100256 prefetchw(&rw->lock);
Will Deacon00efaa02013-08-12 18:04:05 +0100257 do {
258 __asm__ __volatile__(
259 " ldrex %0, [%2]\n"
260 " mov %1, #0\n"
261 " adds %0, %0, #1\n"
262 " strexpl %1, %0, [%2]"
263 : "=&r" (contended), "=&r" (res)
264 : "r" (&rw->lock)
265 : "cc");
266 } while (res);
Russell King8e347032006-08-31 15:09:30 +0100267
Will Deacon00efaa02013-08-12 18:04:05 +0100268 /* If the lock is negative, then it is already held for write. */
269 if (contended < 0x80000000) {
270 smp_mb();
271 return 1;
272 } else {
273 return 0;
274 }
Russell King8e347032006-08-31 15:09:30 +0100275}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100277/* read_can_lock - would read_trylock() succeed? */
Will Deacon9bb17be2013-07-02 14:54:33 +0100278#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100279
Thomas Gleixnere5931942009-12-03 20:08:46 +0100280#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
281#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Robin Holtf5f7eac2009-04-02 16:59:46 -0700282
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100283#define arch_spin_relax(lock) cpu_relax()
284#define arch_read_relax(lock) cpu_relax()
285#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287#endif /* __ASM_SPINLOCK_H */