blob: 43ad4e55878c7961a535e6ce427b842adecb2372 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#if __LINUX_ARM_ARCH__ < 6
5#error SMP not supported on pre-ARMv6 CPUs
6#endif
7
8/*
9 * ARMv6 Spin-locking.
10 *
Russell King6d9b37a2005-07-26 19:44:26 +010011 * We exclusively read the old value. If it is zero, we may have
12 * won the lock, so we try exclusively storing it. A memory barrier
13 * is required after we get a lock, and before we release it, because
14 * V6 CPUs are assumed to have weakly ordered memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
16 * Unlocked value: 0
17 * Locked value: 1
18 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070020#define __raw_spin_is_locked(x) ((x)->lock != 0)
21#define __raw_spin_unlock_wait(lock) \
22 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070024#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070026static inline void __raw_spin_lock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027{
28 unsigned long tmp;
29
30 __asm__ __volatile__(
31"1: ldrex %0, [%1]\n"
32" teq %0, #0\n"
Russell King00b4c902005-12-01 15:47:24 +000033#ifdef CONFIG_CPU_32v6K
34" wfene\n"
35#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070036" strexeq %0, %2, [%1]\n"
37" teqeq %0, #0\n"
38" bne 1b"
39 : "=&r" (tmp)
40 : "r" (&lock->lock), "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +010041 : "cc");
42
43 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070044}
45
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070046static inline int __raw_spin_trylock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047{
48 unsigned long tmp;
49
50 __asm__ __volatile__(
51" ldrex %0, [%1]\n"
52" teq %0, #0\n"
53" strexeq %0, %2, [%1]"
54 : "=&r" (tmp)
55 : "r" (&lock->lock), "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +010056 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Russell King6d9b37a2005-07-26 19:44:26 +010058 if (tmp == 0) {
59 smp_mb();
60 return 1;
61 } else {
62 return 0;
63 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070064}
65
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070066static inline void __raw_spin_unlock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
Russell King6d9b37a2005-07-26 19:44:26 +010068 smp_mb();
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +000071" str %1, [%0]\n"
72#ifdef CONFIG_CPU_32v6K
73" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
74" sev"
75#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 :
77 : "r" (&lock->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +010078 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/*
82 * RWLOCKS
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070083 *
84 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 * Write locks are easy - we just set bit 31. When unlocking, we can
86 * just write zero since the lock is exclusively held.
87 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070088#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
89
Russell King7e86df22005-11-02 15:09:31 +000090static inline void __raw_write_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
92 unsigned long tmp;
93
94 __asm__ __volatile__(
95"1: ldrex %0, [%1]\n"
96" teq %0, #0\n"
Russell King00b4c902005-12-01 15:47:24 +000097#ifdef CONFIG_CPU_32v6K
98" wfene\n"
99#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100" strexeq %0, %2, [%1]\n"
101" teq %0, #0\n"
102" bne 1b"
103 : "=&r" (tmp)
104 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100105 : "cc");
106
107 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108}
109
Russell King7e86df22005-11-02 15:09:31 +0000110static inline int __raw_write_trylock(raw_rwlock_t *rw)
Russell King4e8fd222005-07-24 12:13:40 +0100111{
112 unsigned long tmp;
113
114 __asm__ __volatile__(
115"1: ldrex %0, [%1]\n"
116" teq %0, #0\n"
117" strexeq %0, %2, [%1]"
118 : "=&r" (tmp)
119 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100120 : "cc");
Russell King4e8fd222005-07-24 12:13:40 +0100121
Russell King6d9b37a2005-07-26 19:44:26 +0100122 if (tmp == 0) {
123 smp_mb();
124 return 1;
125 } else {
126 return 0;
127 }
Russell King4e8fd222005-07-24 12:13:40 +0100128}
129
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700130static inline void __raw_write_unlock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
Russell King6d9b37a2005-07-26 19:44:26 +0100132 smp_mb();
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 __asm__ __volatile__(
Russell King00b4c902005-12-01 15:47:24 +0000135 "str %1, [%0]\n"
136#ifdef CONFIG_CPU_32v6K
137" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
138" sev\n"
139#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 :
141 : "r" (&rw->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100142 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143}
144
145/*
146 * Read locks are a bit more hairy:
147 * - Exclusively load the lock value.
148 * - Increment it.
149 * - Store new lock value if positive, and we still own this location.
150 * If the value is negative, we've already failed.
151 * - If we failed to store the value, we want a negative result.
152 * - If we failed, try again.
153 * Unlocking is similarly hairy. We may have multiple read locks
154 * currently active. However, we know we won't have any write
155 * locks.
156 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700157static inline void __raw_read_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 unsigned long tmp, tmp2;
160
161 __asm__ __volatile__(
162"1: ldrex %0, [%2]\n"
163" adds %0, %0, #1\n"
164" strexpl %1, %0, [%2]\n"
Russell King00b4c902005-12-01 15:47:24 +0000165#ifdef CONFIG_CPU_32v6K
166" wfemi\n"
167#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168" rsbpls %0, %1, #0\n"
169" bmi 1b"
170 : "=&r" (tmp), "=&r" (tmp2)
171 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100172 : "cc");
173
174 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
Russell King7e86df22005-11-02 15:09:31 +0000177static inline void __raw_read_unlock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
Russell King4e8fd222005-07-24 12:13:40 +0100179 unsigned long tmp, tmp2;
180
Russell King6d9b37a2005-07-26 19:44:26 +0100181 smp_mb();
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 __asm__ __volatile__(
184"1: ldrex %0, [%2]\n"
185" sub %0, %0, #1\n"
186" strex %1, %0, [%2]\n"
187" teq %1, #0\n"
188" bne 1b"
Russell King00b4c902005-12-01 15:47:24 +0000189#ifdef CONFIG_CPU_32v6K
190"\n cmp %0, #0\n"
191" mcreq p15, 0, %0, c7, c10, 4\n"
192" seveq"
193#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 : "=&r" (tmp), "=&r" (tmp2)
195 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100196 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700199#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#endif /* __ASM_SPINLOCK_H */