blob: 17eb355707dd321643ed995e848d4cebcb087508 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#if __LINUX_ARM_ARCH__ < 6
5#error SMP not supported on pre-ARMv6 CPUs
6#endif
7
Rabin Vincentc5113b62010-01-25 19:43:03 +01008static inline void dsb_sev(void)
9{
10#if __LINUX_ARM_ARCH__ >= 7
11 __asm__ __volatile__ (
12 "dsb\n"
13 "sev"
14 );
15#elif defined(CONFIG_CPU_32v6K)
16 __asm__ __volatile__ (
17 "mcr p15, 0, %0, c7, c10, 4\n"
18 "sev"
19 : : "r" (0)
20 );
21#endif
22}
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024/*
25 * ARMv6 Spin-locking.
26 *
Russell King6d9b37a2005-07-26 19:44:26 +010027 * We exclusively read the old value. If it is zero, we may have
28 * won the lock, so we try exclusively storing it. A memory barrier
29 * is required after we get a lock, and before we release it, because
30 * V6 CPUs are assumed to have weakly ordered memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
32 * Unlocked value: 0
33 * Locked value: 1
34 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010036#define arch_spin_is_locked(x) ((x)->lock != 0)
37#define arch_spin_unlock_wait(lock) \
38 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010040#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010042static inline void arch_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043{
44 unsigned long tmp;
45
46 __asm__ __volatile__(
47"1: ldrex %0, [%1]\n"
48" teq %0, #0\n"
Russell King00b4c902005-12-01 15:47:24 +000049#ifdef CONFIG_CPU_32v6K
50" wfene\n"
51#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070052" strexeq %0, %2, [%1]\n"
53" teqeq %0, #0\n"
54" bne 1b"
55 : "=&r" (tmp)
56 : "r" (&lock->lock), "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +010057 : "cc");
58
59 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010062static inline int arch_spin_trylock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{
64 unsigned long tmp;
65
66 __asm__ __volatile__(
67" ldrex %0, [%1]\n"
68" teq %0, #0\n"
69" strexeq %0, %2, [%1]"
70 : "=&r" (tmp)
71 : "r" (&lock->lock), "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +010072 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Russell King6d9b37a2005-07-26 19:44:26 +010074 if (tmp == 0) {
75 smp_mb();
76 return 1;
77 } else {
78 return 0;
79 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010082static inline void arch_spin_unlock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
Russell King6d9b37a2005-07-26 19:44:26 +010084 smp_mb();
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +000087" str %1, [%0]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 :
89 : "r" (&lock->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +010090 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +010091
92 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94
95/*
96 * RWLOCKS
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070097 *
98 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 * Write locks are easy - we just set bit 31. When unlocking, we can
100 * just write zero since the lock is exclusively held.
101 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700102
Thomas Gleixnere5931942009-12-03 20:08:46 +0100103static inline void arch_write_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
105 unsigned long tmp;
106
107 __asm__ __volatile__(
108"1: ldrex %0, [%1]\n"
109" teq %0, #0\n"
Russell King00b4c902005-12-01 15:47:24 +0000110#ifdef CONFIG_CPU_32v6K
111" wfene\n"
112#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113" strexeq %0, %2, [%1]\n"
114" teq %0, #0\n"
115" bne 1b"
116 : "=&r" (tmp)
117 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100118 : "cc");
119
120 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121}
122
Thomas Gleixnere5931942009-12-03 20:08:46 +0100123static inline int arch_write_trylock(arch_rwlock_t *rw)
Russell King4e8fd222005-07-24 12:13:40 +0100124{
125 unsigned long tmp;
126
127 __asm__ __volatile__(
128"1: ldrex %0, [%1]\n"
129" teq %0, #0\n"
130" strexeq %0, %2, [%1]"
131 : "=&r" (tmp)
132 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100133 : "cc");
Russell King4e8fd222005-07-24 12:13:40 +0100134
Russell King6d9b37a2005-07-26 19:44:26 +0100135 if (tmp == 0) {
136 smp_mb();
137 return 1;
138 } else {
139 return 0;
140 }
Russell King4e8fd222005-07-24 12:13:40 +0100141}
142
Thomas Gleixnere5931942009-12-03 20:08:46 +0100143static inline void arch_write_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
Russell King6d9b37a2005-07-26 19:44:26 +0100145 smp_mb();
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +0000148 "str %1, [%0]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 :
150 : "r" (&rw->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100151 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100152
153 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154}
155
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100156/* write_can_lock - would write_trylock() succeed? */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100157#define arch_write_can_lock(x) ((x)->lock == 0)
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159/*
160 * Read locks are a bit more hairy:
161 * - Exclusively load the lock value.
162 * - Increment it.
163 * - Store new lock value if positive, and we still own this location.
164 * If the value is negative, we've already failed.
165 * - If we failed to store the value, we want a negative result.
166 * - If we failed, try again.
167 * Unlocking is similarly hairy. We may have multiple read locks
168 * currently active. However, we know we won't have any write
169 * locks.
170 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100171static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
173 unsigned long tmp, tmp2;
174
175 __asm__ __volatile__(
176"1: ldrex %0, [%2]\n"
177" adds %0, %0, #1\n"
178" strexpl %1, %0, [%2]\n"
Russell King00b4c902005-12-01 15:47:24 +0000179#ifdef CONFIG_CPU_32v6K
180" wfemi\n"
181#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182" rsbpls %0, %1, #0\n"
183" bmi 1b"
184 : "=&r" (tmp), "=&r" (tmp2)
185 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100186 : "cc");
187
188 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
Thomas Gleixnere5931942009-12-03 20:08:46 +0100191static inline void arch_read_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
Russell King4e8fd222005-07-24 12:13:40 +0100193 unsigned long tmp, tmp2;
194
Russell King6d9b37a2005-07-26 19:44:26 +0100195 smp_mb();
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 __asm__ __volatile__(
198"1: ldrex %0, [%2]\n"
199" sub %0, %0, #1\n"
200" strex %1, %0, [%2]\n"
201" teq %1, #0\n"
202" bne 1b"
203 : "=&r" (tmp), "=&r" (tmp2)
204 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100205 : "cc");
Rabin Vincentc5113b62010-01-25 19:43:03 +0100206
207 if (tmp == 0)
208 dsb_sev();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Thomas Gleixnere5931942009-12-03 20:08:46 +0100211static inline int arch_read_trylock(arch_rwlock_t *rw)
Russell King8e347032006-08-31 15:09:30 +0100212{
Catalin Marinase89bc812006-09-06 19:03:14 +0100213 unsigned long tmp, tmp2 = 1;
Russell King8e347032006-08-31 15:09:30 +0100214
215 __asm__ __volatile__(
216"1: ldrex %0, [%2]\n"
217" adds %0, %0, #1\n"
218" strexpl %1, %0, [%2]\n"
219 : "=&r" (tmp), "+r" (tmp2)
220 : "r" (&rw->lock)
221 : "cc");
222
223 smp_mb();
224 return tmp2 == 0;
225}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100227/* read_can_lock - would read_trylock() succeed? */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100228#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
Catalin Marinasc2a4c402006-05-19 21:55:35 +0100229
Thomas Gleixnere5931942009-12-03 20:08:46 +0100230#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
231#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Robin Holtf5f7eac2009-04-02 16:59:46 -0700232
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100233#define arch_spin_relax(lock) cpu_relax()
234#define arch_read_relax(lock) cpu_relax()
235#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237#endif /* __ASM_SPINLOCK_H */