blob: bb897016c491a24e8369d461a50a59a892457154 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Ralf Baechlef65e4fa2006-09-28 01:45:21 +01006 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
Ralf Baechle0004a9d2006-10-31 03:45:07 +000012#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/war.h>
14
15/*
16 * Your basic SMP spinlocks, allowing only a single CPU anywhere
17 */
18
Ralf Baechlebeb3ca82005-10-29 19:32:40 +010019#define __raw_spin_is_locked(x) ((x)->lock != 0)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070020#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
21#define __raw_spin_unlock_wait(x) \
Ralf Baechlebeb3ca82005-10-29 19:32:40 +010022 do { cpu_relax(); } while ((x)->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24/*
25 * Simple spin lock operations. There are two variants, one clears IRQ's
26 * on the local processor, one does not.
27 *
28 * We make no fairness assumptions. They have a cost.
29 */
30
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070031static inline void __raw_spin_lock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
33 unsigned int tmp;
34
35 if (R10000_LLSC_WAR) {
36 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070037 " .set noreorder # __raw_spin_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 "1: ll %1, %2 \n"
39 " bnez %1, 1b \n"
40 " li %1, 1 \n"
41 " sc %1, %0 \n"
42 " beqzl %1, 1b \n"
43 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 " .set reorder \n"
45 : "=m" (lock->lock), "=&r" (tmp)
46 : "m" (lock->lock)
47 : "memory");
48 } else {
49 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070050 " .set noreorder # __raw_spin_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 "1: ll %1, %2 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +010052 " bnez %1, 2f \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 " li %1, 1 \n"
54 " sc %1, %0 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +010055 " beqz %1, 2f \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +000056 " nop \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +010057 " .subsection 2 \n"
58 "2: ll %1, %2 \n"
59 " bnez %1, 2b \n"
60 " li %1, 1 \n"
61 " b 1b \n"
62 " nop \n"
63 " .previous \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 " .set reorder \n"
65 : "=m" (lock->lock), "=&r" (tmp)
66 : "m" (lock->lock)
67 : "memory");
68 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +000069
Ralf Baechle17099b12007-07-14 13:24:05 +010070 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070073static inline void __raw_spin_unlock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
Ralf Baechle0004a9d2006-10-31 03:45:07 +000075 smp_mb();
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070078 " .set noreorder # __raw_spin_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 " sw $0, %0 \n"
80 " .set\treorder \n"
81 : "=m" (lock->lock)
82 : "m" (lock->lock)
83 : "memory");
84}
85
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070086static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
88 unsigned int temp, res;
89
90 if (R10000_LLSC_WAR) {
91 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070092 " .set noreorder # __raw_spin_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 "1: ll %0, %3 \n"
94 " ori %2, %0, 1 \n"
95 " sc %2, %1 \n"
96 " beqzl %2, 1b \n"
97 " nop \n"
98 " andi %2, %0, 1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 " .set reorder"
100 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
101 : "m" (lock->lock)
102 : "memory");
103 } else {
104 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700105 " .set noreorder # __raw_spin_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 "1: ll %0, %3 \n"
107 " ori %2, %0, 1 \n"
108 " sc %2, %1 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100109 " beqz %2, 2f \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 " andi %2, %0, 1 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100111 " .subsection 2 \n"
112 "2: b 1b \n"
113 " nop \n"
114 " .previous \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 " .set reorder"
116 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
117 : "m" (lock->lock)
118 : "memory");
119 }
120
Ralf Baechle17099b12007-07-14 13:24:05 +0100121 smp_llsc_mb();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000122
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 return res == 0;
124}
125
126/*
127 * Read-write spinlocks, allowing multiple readers but only one writer.
128 *
129 * NOTE! it is quite common to have readers in interrupts but no interrupt
130 * writers. For those circumstances we can "mix" irq-safe locks - any writer
131 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
132 * read-locks.
133 */
134
Ralf Baechlee3c48072005-02-03 13:34:45 +0000135/*
136 * read_can_lock - would read_trylock() succeed?
137 * @lock: the rwlock in question.
138 */
139#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
140
141/*
142 * write_can_lock - would write_trylock() succeed?
143 * @lock: the rwlock in question.
144 */
145#define __raw_write_can_lock(rw) (!(rw)->lock)
146
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700147static inline void __raw_read_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148{
149 unsigned int tmp;
150
151 if (R10000_LLSC_WAR) {
152 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700153 " .set noreorder # __raw_read_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 "1: ll %1, %2 \n"
155 " bltz %1, 1b \n"
156 " addu %1, 1 \n"
157 " sc %1, %0 \n"
158 " beqzl %1, 1b \n"
159 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 " .set reorder \n"
161 : "=m" (rw->lock), "=&r" (tmp)
162 : "m" (rw->lock)
163 : "memory");
164 } else {
165 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700166 " .set noreorder # __raw_read_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 "1: ll %1, %2 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100168 " bltz %1, 2f \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 " addu %1, 1 \n"
170 " sc %1, %0 \n"
171 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000172 " nop \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100173 " .subsection 2 \n"
174 "2: ll %1, %2 \n"
175 " bltz %1, 2b \n"
176 " addu %1, 1 \n"
177 " b 1b \n"
178 " nop \n"
179 " .previous \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 " .set reorder \n"
181 : "=m" (rw->lock), "=&r" (tmp)
182 : "m" (rw->lock)
183 : "memory");
184 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000185
Ralf Baechle17099b12007-07-14 13:24:05 +0100186 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
189/* Note the use of sub, not subu which will make the kernel die with an
190 overflow exception if we ever try to unlock an rwlock that is already
191 unlocked or is being held by a writer. */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700192static inline void __raw_read_unlock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
194 unsigned int tmp;
195
Ralf Baechle17099b12007-07-14 13:24:05 +0100196 smp_llsc_mb();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 if (R10000_LLSC_WAR) {
199 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700200 "1: ll %1, %2 # __raw_read_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 " sub %1, 1 \n"
202 " sc %1, %0 \n"
203 " beqzl %1, 1b \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 : "=m" (rw->lock), "=&r" (tmp)
205 : "m" (rw->lock)
206 : "memory");
207 } else {
208 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700209 " .set noreorder # __raw_read_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 "1: ll %1, %2 \n"
211 " sub %1, 1 \n"
212 " sc %1, %0 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100213 " beqz %1, 2f \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000214 " nop \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100215 " .subsection 2 \n"
216 "2: b 1b \n"
217 " nop \n"
218 " .previous \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 " .set reorder \n"
220 : "=m" (rw->lock), "=&r" (tmp)
221 : "m" (rw->lock)
222 : "memory");
223 }
224}
225
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700226static inline void __raw_write_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
228 unsigned int tmp;
229
230 if (R10000_LLSC_WAR) {
231 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700232 " .set noreorder # __raw_write_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 "1: ll %1, %2 \n"
234 " bnez %1, 1b \n"
235 " lui %1, 0x8000 \n"
236 " sc %1, %0 \n"
237 " beqzl %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000238 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 " .set reorder \n"
240 : "=m" (rw->lock), "=&r" (tmp)
241 : "m" (rw->lock)
242 : "memory");
243 } else {
244 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700245 " .set noreorder # __raw_write_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 "1: ll %1, %2 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100247 " bnez %1, 2f \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 " lui %1, 0x8000 \n"
249 " sc %1, %0 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100250 " beqz %1, 2f \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000251 " nop \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100252 " .subsection 2 \n"
253 "2: ll %1, %2 \n"
254 " bnez %1, 2b \n"
255 " lui %1, 0x8000 \n"
256 " b 1b \n"
257 " nop \n"
258 " .previous \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 " .set reorder \n"
260 : "=m" (rw->lock), "=&r" (tmp)
261 : "m" (rw->lock)
262 : "memory");
263 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000264
Ralf Baechle17099b12007-07-14 13:24:05 +0100265 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266}
267
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700268static inline void __raw_write_unlock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000270 smp_mb();
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 __asm__ __volatile__(
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000273 " # __raw_write_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 " sw $0, %0 \n"
275 : "=m" (rw->lock)
276 : "m" (rw->lock)
277 : "memory");
278}
279
Ralf Baechle65316fd2006-08-31 14:16:06 +0100280static inline int __raw_read_trylock(raw_rwlock_t *rw)
281{
282 unsigned int tmp;
283 int ret;
284
285 if (R10000_LLSC_WAR) {
286 __asm__ __volatile__(
287 " .set noreorder # __raw_read_trylock \n"
288 " li %2, 0 \n"
289 "1: ll %1, %3 \n"
Dave Johnsond52c2d52007-03-05 20:50:27 -0500290 " bltz %1, 2f \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100291 " addu %1, 1 \n"
292 " sc %1, %0 \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100293 " .set reorder \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000294 " beqzl %1, 1b \n"
295 " nop \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100296 __WEAK_LLSC_MB
Ralf Baechle65316fd2006-08-31 14:16:06 +0100297 " li %2, 1 \n"
298 "2: \n"
299 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
300 : "m" (rw->lock)
301 : "memory");
302 } else {
303 __asm__ __volatile__(
304 " .set noreorder # __raw_read_trylock \n"
305 " li %2, 0 \n"
306 "1: ll %1, %3 \n"
Dave Johnsond52c2d52007-03-05 20:50:27 -0500307 " bltz %1, 2f \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100308 " addu %1, 1 \n"
309 " sc %1, %0 \n"
310 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000311 " nop \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100312 " .set reorder \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100313 __WEAK_LLSC_MB
Ralf Baechle65316fd2006-08-31 14:16:06 +0100314 " li %2, 1 \n"
315 "2: \n"
316 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
317 : "m" (rw->lock)
318 : "memory");
319 }
320
321 return ret;
322}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700324static inline int __raw_write_trylock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325{
326 unsigned int tmp;
327 int ret;
328
329 if (R10000_LLSC_WAR) {
330 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700331 " .set noreorder # __raw_write_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 " li %2, 0 \n"
333 "1: ll %1, %3 \n"
334 " bnez %1, 2f \n"
335 " lui %1, 0x8000 \n"
336 " sc %1, %0 \n"
337 " beqzl %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000338 " nop \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100339 __WEAK_LLSC_MB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 " li %2, 1 \n"
341 " .set reorder \n"
342 "2: \n"
343 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
344 : "m" (rw->lock)
345 : "memory");
346 } else {
347 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700348 " .set noreorder # __raw_write_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 " li %2, 0 \n"
350 "1: ll %1, %3 \n"
351 " bnez %1, 2f \n"
352 " lui %1, 0x8000 \n"
353 " sc %1, %0 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100354 " beqz %1, 3f \n"
355 " li %2, 1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 "2: \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100357 __WEAK_LLSC_MB
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100358 " .subsection 2 \n"
359 "3: b 1b \n"
360 " li %2, 0 \n"
361 " .previous \n"
362 " .set reorder \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
364 : "m" (rw->lock)
365 : "memory");
366 }
367
368 return ret;
369}
370
Ralf Baechle65316fd2006-08-31 14:16:06 +0100371
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700372#define _raw_spin_relax(lock) cpu_relax()
373#define _raw_read_relax(lock) cpu_relax()
374#define _raw_write_relax(lock) cpu_relax()
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376#endif /* _ASM_SPINLOCK_H */