blob: fc3217fc1118deabd9c4c40cb3442cdf1573c2ab [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Ralf Baechle0004a9d2006-10-31 03:45:07 +00006 * Copyright (C) 1999, 2000, 06 by Ralf Baechle
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
Ralf Baechle0004a9d2006-10-31 03:45:07 +000012#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/war.h>
14
15/*
16 * Your basic SMP spinlocks, allowing only a single CPU anywhere
17 */
18
Ralf Baechlebeb3ca82005-10-29 19:32:40 +010019#define __raw_spin_is_locked(x) ((x)->lock != 0)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070020#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
21#define __raw_spin_unlock_wait(x) \
Ralf Baechlebeb3ca82005-10-29 19:32:40 +010022 do { cpu_relax(); } while ((x)->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24/*
25 * Simple spin lock operations. There are two variants, one clears IRQ's
26 * on the local processor, one does not.
27 *
28 * We make no fairness assumptions. They have a cost.
29 */
30
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070031static inline void __raw_spin_lock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
33 unsigned int tmp;
34
35 if (R10000_LLSC_WAR) {
36 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070037 " .set noreorder # __raw_spin_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 "1: ll %1, %2 \n"
39 " bnez %1, 1b \n"
40 " li %1, 1 \n"
41 " sc %1, %0 \n"
42 " beqzl %1, 1b \n"
43 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 " .set reorder \n"
45 : "=m" (lock->lock), "=&r" (tmp)
46 : "m" (lock->lock)
47 : "memory");
48 } else {
49 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070050 " .set noreorder # __raw_spin_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 "1: ll %1, %2 \n"
52 " bnez %1, 1b \n"
53 " li %1, 1 \n"
54 " sc %1, %0 \n"
55 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +000056 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 " .set reorder \n"
58 : "=m" (lock->lock), "=&r" (tmp)
59 : "m" (lock->lock)
60 : "memory");
61 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +000062
63 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070064}
65
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070066static inline void __raw_spin_unlock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
Ralf Baechle0004a9d2006-10-31 03:45:07 +000068 smp_mb();
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070071 " .set noreorder # __raw_spin_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 " sw $0, %0 \n"
73 " .set\treorder \n"
74 : "=m" (lock->lock)
75 : "m" (lock->lock)
76 : "memory");
77}
78
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070079static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080{
81 unsigned int temp, res;
82
83 if (R10000_LLSC_WAR) {
84 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070085 " .set noreorder # __raw_spin_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 "1: ll %0, %3 \n"
87 " ori %2, %0, 1 \n"
88 " sc %2, %1 \n"
89 " beqzl %2, 1b \n"
90 " nop \n"
91 " andi %2, %0, 1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 " .set reorder"
93 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
94 : "m" (lock->lock)
95 : "memory");
96 } else {
97 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070098 " .set noreorder # __raw_spin_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 "1: ll %0, %3 \n"
100 " ori %2, %0, 1 \n"
101 " sc %2, %1 \n"
102 " beqz %2, 1b \n"
103 " andi %2, %0, 1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 " .set reorder"
105 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
106 : "m" (lock->lock)
107 : "memory");
108 }
109
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000110 smp_mb();
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 return res == 0;
113}
114
115/*
116 * Read-write spinlocks, allowing multiple readers but only one writer.
117 *
118 * NOTE! it is quite common to have readers in interrupts but no interrupt
119 * writers. For those circumstances we can "mix" irq-safe locks - any writer
120 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
121 * read-locks.
122 */
123
Ralf Baechlee3c48072005-02-03 13:34:45 +0000124/*
125 * read_can_lock - would read_trylock() succeed?
126 * @lock: the rwlock in question.
127 */
128#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
129
130/*
131 * write_can_lock - would write_trylock() succeed?
132 * @lock: the rwlock in question.
133 */
134#define __raw_write_can_lock(rw) (!(rw)->lock)
135
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700136static inline void __raw_read_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137{
138 unsigned int tmp;
139
140 if (R10000_LLSC_WAR) {
141 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700142 " .set noreorder # __raw_read_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 "1: ll %1, %2 \n"
144 " bltz %1, 1b \n"
145 " addu %1, 1 \n"
146 " sc %1, %0 \n"
147 " beqzl %1, 1b \n"
148 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 " .set reorder \n"
150 : "=m" (rw->lock), "=&r" (tmp)
151 : "m" (rw->lock)
152 : "memory");
153 } else {
154 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700155 " .set noreorder # __raw_read_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 "1: ll %1, %2 \n"
157 " bltz %1, 1b \n"
158 " addu %1, 1 \n"
159 " sc %1, %0 \n"
160 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000161 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 " .set reorder \n"
163 : "=m" (rw->lock), "=&r" (tmp)
164 : "m" (rw->lock)
165 : "memory");
166 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000167
168 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169}
170
171/* Note the use of sub, not subu which will make the kernel die with an
172 overflow exception if we ever try to unlock an rwlock that is already
173 unlocked or is being held by a writer. */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700174static inline void __raw_read_unlock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
176 unsigned int tmp;
177
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000178 smp_mb();
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 if (R10000_LLSC_WAR) {
181 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700182 "1: ll %1, %2 # __raw_read_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 " sub %1, 1 \n"
184 " sc %1, %0 \n"
185 " beqzl %1, 1b \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 : "=m" (rw->lock), "=&r" (tmp)
187 : "m" (rw->lock)
188 : "memory");
189 } else {
190 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700191 " .set noreorder # __raw_read_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 "1: ll %1, %2 \n"
193 " sub %1, 1 \n"
194 " sc %1, %0 \n"
195 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000196 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 " .set reorder \n"
198 : "=m" (rw->lock), "=&r" (tmp)
199 : "m" (rw->lock)
200 : "memory");
201 }
202}
203
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700204static inline void __raw_write_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
206 unsigned int tmp;
207
208 if (R10000_LLSC_WAR) {
209 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700210 " .set noreorder # __raw_write_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 "1: ll %1, %2 \n"
212 " bnez %1, 1b \n"
213 " lui %1, 0x8000 \n"
214 " sc %1, %0 \n"
215 " beqzl %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000216 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 " .set reorder \n"
218 : "=m" (rw->lock), "=&r" (tmp)
219 : "m" (rw->lock)
220 : "memory");
221 } else {
222 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700223 " .set noreorder # __raw_write_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 "1: ll %1, %2 \n"
225 " bnez %1, 1b \n"
226 " lui %1, 0x8000 \n"
227 " sc %1, %0 \n"
228 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000229 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 " .set reorder \n"
231 : "=m" (rw->lock), "=&r" (tmp)
232 : "m" (rw->lock)
233 : "memory");
234 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000235
236 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237}
238
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700239static inline void __raw_write_unlock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240{
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000241 smp_mb();
242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 __asm__ __volatile__(
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000244 " # __raw_write_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 " sw $0, %0 \n"
246 : "=m" (rw->lock)
247 : "m" (rw->lock)
248 : "memory");
249}
250
Ralf Baechle65316fd2006-08-31 14:16:06 +0100251static inline int __raw_read_trylock(raw_rwlock_t *rw)
252{
253 unsigned int tmp;
254 int ret;
255
256 if (R10000_LLSC_WAR) {
257 __asm__ __volatile__(
258 " .set noreorder # __raw_read_trylock \n"
259 " li %2, 0 \n"
260 "1: ll %1, %3 \n"
261 " bnez %1, 2f \n"
262 " addu %1, 1 \n"
263 " sc %1, %0 \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100264 " .set reorder \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000265 " beqzl %1, 1b \n"
266 " nop \n"
267 __WEAK_ORDERING_MB
Ralf Baechle65316fd2006-08-31 14:16:06 +0100268 " li %2, 1 \n"
269 "2: \n"
270 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
271 : "m" (rw->lock)
272 : "memory");
273 } else {
274 __asm__ __volatile__(
275 " .set noreorder # __raw_read_trylock \n"
276 " li %2, 0 \n"
277 "1: ll %1, %3 \n"
278 " bnez %1, 2f \n"
279 " addu %1, 1 \n"
280 " sc %1, %0 \n"
281 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000282 " nop \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100283 " .set reorder \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000284 __WEAK_ORDERING_MB
Ralf Baechle65316fd2006-08-31 14:16:06 +0100285 " li %2, 1 \n"
286 "2: \n"
287 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
288 : "m" (rw->lock)
289 : "memory");
290 }
291
292 return ret;
293}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700295static inline int __raw_write_trylock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296{
297 unsigned int tmp;
298 int ret;
299
300 if (R10000_LLSC_WAR) {
301 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700302 " .set noreorder # __raw_write_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 " li %2, 0 \n"
304 "1: ll %1, %3 \n"
305 " bnez %1, 2f \n"
306 " lui %1, 0x8000 \n"
307 " sc %1, %0 \n"
308 " beqzl %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000309 " nop \n"
310 __WEAK_ORDERING_MB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 " li %2, 1 \n"
312 " .set reorder \n"
313 "2: \n"
314 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
315 : "m" (rw->lock)
316 : "memory");
317 } else {
318 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700319 " .set noreorder # __raw_write_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 " li %2, 0 \n"
321 "1: ll %1, %3 \n"
322 " bnez %1, 2f \n"
323 " lui %1, 0x8000 \n"
324 " sc %1, %0 \n"
325 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000326 " nop \n"
327 __WEAK_ORDERING_MB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 " li %2, 1 \n"
329 " .set reorder \n"
330 "2: \n"
331 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
332 : "m" (rw->lock)
333 : "memory");
334 }
335
336 return ret;
337}
338
Ralf Baechle65316fd2006-08-31 14:16:06 +0100339
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700340#define _raw_spin_relax(lock) cpu_relax()
341#define _raw_read_relax(lock) cpu_relax()
342#define _raw_write_relax(lock) cpu_relax()
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344#endif /* _ASM_SPINLOCK_H */