blob: 1f906d09b6880f2ac04e9bb0dedf3dd0eafdafee [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#if __LINUX_ARM_ARCH__ < 6
5#error SMP not supported on pre-ARMv6 CPUs
6#endif
7
8/*
9 * ARMv6 Spin-locking.
10 *
Russell King6d9b37a2005-07-26 19:44:26 +010011 * We exclusively read the old value. If it is zero, we may have
12 * won the lock, so we try exclusively storing it. A memory barrier
13 * is required after we get a lock, and before we release it, because
14 * V6 CPUs are assumed to have weakly ordered memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
16 * Unlocked value: 0
17 * Locked value: 1
18 */
19typedef struct {
20 volatile unsigned int lock;
21#ifdef CONFIG_PREEMPT
22 unsigned int break_lock;
23#endif
24} spinlock_t;
25
26#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
27
28#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
29#define spin_is_locked(x) ((x)->lock != 0)
30#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
31#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
32
33static inline void _raw_spin_lock(spinlock_t *lock)
34{
35 unsigned long tmp;
36
37 __asm__ __volatile__(
38"1: ldrex %0, [%1]\n"
39" teq %0, #0\n"
40" strexeq %0, %2, [%1]\n"
41" teqeq %0, #0\n"
42" bne 1b"
43 : "=&r" (tmp)
44 : "r" (&lock->lock), "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +010045 : "cc");
46
47 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070048}
49
50static inline int _raw_spin_trylock(spinlock_t *lock)
51{
52 unsigned long tmp;
53
54 __asm__ __volatile__(
55" ldrex %0, [%1]\n"
56" teq %0, #0\n"
57" strexeq %0, %2, [%1]"
58 : "=&r" (tmp)
59 : "r" (&lock->lock), "r" (1)
Russell King6d9b37a2005-07-26 19:44:26 +010060 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Russell King6d9b37a2005-07-26 19:44:26 +010062 if (tmp == 0) {
63 smp_mb();
64 return 1;
65 } else {
66 return 0;
67 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
70static inline void _raw_spin_unlock(spinlock_t *lock)
71{
Russell King6d9b37a2005-07-26 19:44:26 +010072 smp_mb();
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 __asm__ __volatile__(
75" str %1, [%0]"
76 :
77 : "r" (&lock->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +010078 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/*
82 * RWLOCKS
83 */
84typedef struct {
85 volatile unsigned int lock;
86#ifdef CONFIG_PREEMPT
87 unsigned int break_lock;
88#endif
89} rwlock_t;
90
91#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
Russell King4e8fd222005-07-24 12:13:40 +010092#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
93#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95/*
96 * Write locks are easy - we just set bit 31. When unlocking, we can
97 * just write zero since the lock is exclusively held.
98 */
99static inline void _raw_write_lock(rwlock_t *rw)
100{
101 unsigned long tmp;
102
103 __asm__ __volatile__(
104"1: ldrex %0, [%1]\n"
105" teq %0, #0\n"
106" strexeq %0, %2, [%1]\n"
107" teq %0, #0\n"
108" bne 1b"
109 : "=&r" (tmp)
110 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100111 : "cc");
112
113 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114}
115
Russell King4e8fd222005-07-24 12:13:40 +0100116static inline int _raw_write_trylock(rwlock_t *rw)
117{
118 unsigned long tmp;
119
120 __asm__ __volatile__(
121"1: ldrex %0, [%1]\n"
122" teq %0, #0\n"
123" strexeq %0, %2, [%1]"
124 : "=&r" (tmp)
125 : "r" (&rw->lock), "r" (0x80000000)
Russell King6d9b37a2005-07-26 19:44:26 +0100126 : "cc");
Russell King4e8fd222005-07-24 12:13:40 +0100127
Russell King6d9b37a2005-07-26 19:44:26 +0100128 if (tmp == 0) {
129 smp_mb();
130 return 1;
131 } else {
132 return 0;
133 }
Russell King4e8fd222005-07-24 12:13:40 +0100134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136static inline void _raw_write_unlock(rwlock_t *rw)
137{
Russell King6d9b37a2005-07-26 19:44:26 +0100138 smp_mb();
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 __asm__ __volatile__(
141 "str %1, [%0]"
142 :
143 : "r" (&rw->lock), "r" (0)
Russell King6d9b37a2005-07-26 19:44:26 +0100144 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
146
147/*
148 * Read locks are a bit more hairy:
149 * - Exclusively load the lock value.
150 * - Increment it.
151 * - Store new lock value if positive, and we still own this location.
152 * If the value is negative, we've already failed.
153 * - If we failed to store the value, we want a negative result.
154 * - If we failed, try again.
155 * Unlocking is similarly hairy. We may have multiple read locks
156 * currently active. However, we know we won't have any write
157 * locks.
158 */
159static inline void _raw_read_lock(rwlock_t *rw)
160{
161 unsigned long tmp, tmp2;
162
163 __asm__ __volatile__(
164"1: ldrex %0, [%2]\n"
165" adds %0, %0, #1\n"
166" strexpl %1, %0, [%2]\n"
167" rsbpls %0, %1, #0\n"
168" bmi 1b"
169 : "=&r" (tmp), "=&r" (tmp2)
170 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100171 : "cc");
172
173 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
176static inline void _raw_read_unlock(rwlock_t *rw)
177{
Russell King4e8fd222005-07-24 12:13:40 +0100178 unsigned long tmp, tmp2;
179
Russell King6d9b37a2005-07-26 19:44:26 +0100180 smp_mb();
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 __asm__ __volatile__(
183"1: ldrex %0, [%2]\n"
184" sub %0, %0, #1\n"
185" strex %1, %0, [%2]\n"
186" teq %1, #0\n"
187" bne 1b"
188 : "=&r" (tmp), "=&r" (tmp2)
189 : "r" (&rw->lock)
Russell King6d9b37a2005-07-26 19:44:26 +0100190 : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
193#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195#endif /* __ASM_SPINLOCK_H */