blob: 5d98a3cb85b711f6b9f8a0f1ff7cf8a82b920778 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Ralf Baechlef65e4fa2006-09-28 01:45:21 +01006 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
Ralf Baechle2a31b032008-08-28 15:17:49 +010012#include <linux/compiler.h>
13
Ralf Baechle0004a9d2006-10-31 03:45:07 +000014#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/war.h>
16
17/*
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
Ralf Baechle2a31b032008-08-28 15:17:49 +010019 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 * Simple spin lock operations. There are two variants, one clears IRQ's
21 * on the local processor, one does not.
22 *
Ralf Baechle2a31b032008-08-28 15:17:49 +010023 * These are fair FIFO ticket locks
24 *
25 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 */
27
Ralf Baechle2a31b032008-08-28 15:17:49 +010028
29/*
30 * Ticket locks are conceptually two parts, one indicating the current head of
31 * the queue, and the other indicating the current tail. The lock is acquired
32 * by atomically noting the tail and incrementing it by one (thus adding
33 * ourself to the queue and noting our position), then waiting until the head
34 * becomes equal to the the initial value of the tail.
35 */
36
37static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
38{
39 unsigned int counters = ACCESS_ONCE(lock->lock);
40
41 return ((counters >> 14) ^ counters) & 0x1fff;
42}
43
44#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
45#define __raw_spin_unlock_wait(x) \
46 while (__raw_spin_is_locked(x)) { cpu_relax(); }
47
48static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
49{
50 unsigned int counters = ACCESS_ONCE(lock->lock);
51
52 return (((counters >> 14) - counters) & 0x1fff) > 1;
53}
54
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070055static inline void __raw_spin_lock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Ralf Baechle2a31b032008-08-28 15:17:49 +010057 int my_ticket;
58 int tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60 if (R10000_LLSC_WAR) {
Ralf Baechle2a31b032008-08-28 15:17:49 +010061 __asm__ __volatile__ (
62 " .set push # __raw_spin_lock \n"
63 " .set noreorder \n"
64 " \n"
65 "1: ll %[ticket], %[ticket_ptr] \n"
66 " addiu %[my_ticket], %[ticket], 0x4000 \n"
67 " sc %[my_ticket], %[ticket_ptr] \n"
68 " beqzl %[my_ticket], 1b \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 " nop \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010070 " srl %[my_ticket], %[ticket], 14 \n"
71 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
72 " andi %[ticket], %[ticket], 0x1fff \n"
73 " bne %[ticket], %[my_ticket], 4f \n"
74 " subu %[ticket], %[my_ticket], %[ticket] \n"
75 "2: \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +010076 " .subsection 2 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010077 "4: andi %[ticket], %[ticket], 0x1fff \n"
78 "5: sll %[ticket], 5 \n"
79 " \n"
80 "6: bnez %[ticket], 6b \n"
81 " subu %[ticket], 1 \n"
82 " \n"
83 " lw %[ticket], %[ticket_ptr] \n"
84 " andi %[ticket], %[ticket], 0x1fff \n"
85 " beq %[ticket], %[my_ticket], 2b \n"
86 " subu %[ticket], %[my_ticket], %[ticket] \n"
87 " b 5b \n"
88 " subu %[ticket], %[ticket], 1 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +010089 " .previous \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010090 " .set pop \n"
91 : [ticket_ptr] "+m" (lock->lock),
92 [ticket] "=&r" (tmp),
93 [my_ticket] "=&r" (my_ticket));
94 } else {
95 __asm__ __volatile__ (
96 " .set push # __raw_spin_lock \n"
97 " .set noreorder \n"
98 " \n"
99 " ll %[ticket], %[ticket_ptr] \n"
100 "1: addiu %[my_ticket], %[ticket], 0x4000 \n"
101 " sc %[my_ticket], %[ticket_ptr] \n"
102 " beqz %[my_ticket], 3f \n"
103 " nop \n"
104 " srl %[my_ticket], %[ticket], 14 \n"
105 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
106 " andi %[ticket], %[ticket], 0x1fff \n"
107 " bne %[ticket], %[my_ticket], 4f \n"
108 " subu %[ticket], %[my_ticket], %[ticket] \n"
109 "2: \n"
110 " .subsection 2 \n"
111 "3: b 1b \n"
112 " ll %[ticket], %[ticket_ptr] \n"
113 " \n"
114 "4: andi %[ticket], %[ticket], 0x1fff \n"
115 "5: sll %[ticket], 5 \n"
116 " \n"
117 "6: bnez %[ticket], 6b \n"
118 " subu %[ticket], 1 \n"
119 " \n"
120 " lw %[ticket], %[ticket_ptr] \n"
121 " andi %[ticket], %[ticket], 0x1fff \n"
122 " beq %[ticket], %[my_ticket], 2b \n"
123 " subu %[ticket], %[my_ticket], %[ticket] \n"
124 " b 5b \n"
125 " subu %[ticket], %[ticket], 1 \n"
126 " .previous \n"
127 " .set pop \n"
128 : [ticket_ptr] "+m" (lock->lock),
129 [ticket] "=&r" (tmp),
130 [my_ticket] "=&r" (my_ticket));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000132
Ralf Baechle17099b12007-07-14 13:24:05 +0100133 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
135
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700136static inline void __raw_spin_unlock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137{
Ralf Baechle2a31b032008-08-28 15:17:49 +0100138 int tmp;
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000139
Ralf Baechle2a31b032008-08-28 15:17:49 +0100140 smp_llsc_mb();
141
142 if (R10000_LLSC_WAR) {
143 __asm__ __volatile__ (
144 " # __raw_spin_unlock \n"
145 "1: ll %[ticket], %[ticket_ptr] \n"
146 " addiu %[ticket], %[ticket], 1 \n"
147 " ori %[ticket], %[ticket], 0x2000 \n"
148 " xori %[ticket], %[ticket], 0x2000 \n"
149 " sc %[ticket], %[ticket_ptr] \n"
150 " beqzl %[ticket], 2f \n"
151 : [ticket_ptr] "+m" (lock->lock),
152 [ticket] "=&r" (tmp));
153 } else {
154 __asm__ __volatile__ (
155 " .set push # __raw_spin_unlock \n"
156 " .set noreorder \n"
157 " \n"
158 " ll %[ticket], %[ticket_ptr] \n"
159 "1: addiu %[ticket], %[ticket], 1 \n"
160 " ori %[ticket], %[ticket], 0x2000 \n"
161 " xori %[ticket], %[ticket], 0x2000 \n"
162 " sc %[ticket], %[ticket_ptr] \n"
163 " beqz %[ticket], 2f \n"
164 " nop \n"
165 " \n"
166 " .subsection 2 \n"
167 "2: b 1b \n"
168 " ll %[ticket], %[ticket_ptr] \n"
169 " .previous \n"
170 " .set pop \n"
171 : [ticket_ptr] "+m" (lock->lock),
172 [ticket] "=&r" (tmp));
173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700176static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Ralf Baechle2a31b032008-08-28 15:17:49 +0100178 int tmp, tmp2, tmp3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
180 if (R10000_LLSC_WAR) {
Ralf Baechle2a31b032008-08-28 15:17:49 +0100181 __asm__ __volatile__ (
182 " .set push # __raw_spin_trylock \n"
183 " .set noreorder \n"
184 " \n"
185 "1: ll %[ticket], %[ticket_ptr] \n"
186 " srl %[my_ticket], %[ticket], 14 \n"
187 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
188 " andi %[now_serving], %[ticket], 0x1fff \n"
189 " bne %[my_ticket], %[now_serving], 3f \n"
190 " addiu %[ticket], %[ticket], 0x4000 \n"
191 " sc %[ticket], %[ticket_ptr] \n"
192 " beqzl %[ticket], 1b \n"
193 " li %[ticket], 1 \n"
194 "2: \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100195 " .subsection 2 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100196 "3: b 2b \n"
197 " li %[ticket], 0 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100198 " .previous \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100199 " .set pop \n"
200 : [ticket_ptr] "+m" (lock->lock),
201 [ticket] "=&r" (tmp),
202 [my_ticket] "=&r" (tmp2),
203 [now_serving] "=&r" (tmp3));
204 } else {
205 __asm__ __volatile__ (
206 " .set push # __raw_spin_trylock \n"
207 " .set noreorder \n"
208 " \n"
209 " ll %[ticket], %[ticket_ptr] \n"
210 "1: srl %[my_ticket], %[ticket], 14 \n"
211 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
212 " andi %[now_serving], %[ticket], 0x1fff \n"
213 " bne %[my_ticket], %[now_serving], 3f \n"
214 " addiu %[ticket], %[ticket], 0x4000 \n"
215 " sc %[ticket], %[ticket_ptr] \n"
216 " beqz %[ticket], 4f \n"
217 " li %[ticket], 1 \n"
218 "2: \n"
219 " .subsection 2 \n"
220 "3: b 2b \n"
221 " li %[ticket], 0 \n"
222 "4: b 1b \n"
223 " ll %[ticket], %[ticket_ptr] \n"
224 " .previous \n"
225 " .set pop \n"
226 : [ticket_ptr] "+m" (lock->lock),
227 [ticket] "=&r" (tmp),
228 [my_ticket] "=&r" (tmp2),
229 [now_serving] "=&r" (tmp3));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 }
231
Ralf Baechle17099b12007-07-14 13:24:05 +0100232 smp_llsc_mb();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000233
Ralf Baechle2a31b032008-08-28 15:17:49 +0100234 return tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235}
236
237/*
238 * Read-write spinlocks, allowing multiple readers but only one writer.
239 *
240 * NOTE! it is quite common to have readers in interrupts but no interrupt
241 * writers. For those circumstances we can "mix" irq-safe locks - any writer
242 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
243 * read-locks.
244 */
245
Ralf Baechlee3c48072005-02-03 13:34:45 +0000246/*
247 * read_can_lock - would read_trylock() succeed?
248 * @lock: the rwlock in question.
249 */
250#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
251
252/*
253 * write_can_lock - would write_trylock() succeed?
254 * @lock: the rwlock in question.
255 */
256#define __raw_write_can_lock(rw) (!(rw)->lock)
257
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700258static inline void __raw_read_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259{
260 unsigned int tmp;
261
262 if (R10000_LLSC_WAR) {
263 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700264 " .set noreorder # __raw_read_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "1: ll %1, %2 \n"
266 " bltz %1, 1b \n"
267 " addu %1, 1 \n"
268 " sc %1, %0 \n"
269 " beqzl %1, 1b \n"
270 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 " .set reorder \n"
272 : "=m" (rw->lock), "=&r" (tmp)
273 : "m" (rw->lock)
274 : "memory");
275 } else {
276 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700277 " .set noreorder # __raw_read_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 "1: ll %1, %2 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100279 " bltz %1, 2f \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 " addu %1, 1 \n"
281 " sc %1, %0 \n"
282 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000283 " nop \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100284 " .subsection 2 \n"
285 "2: ll %1, %2 \n"
286 " bltz %1, 2b \n"
287 " addu %1, 1 \n"
288 " b 1b \n"
289 " nop \n"
290 " .previous \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 " .set reorder \n"
292 : "=m" (rw->lock), "=&r" (tmp)
293 : "m" (rw->lock)
294 : "memory");
295 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000296
Ralf Baechle17099b12007-07-14 13:24:05 +0100297 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
300/* Note the use of sub, not subu which will make the kernel die with an
301 overflow exception if we ever try to unlock an rwlock that is already
302 unlocked or is being held by a writer. */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700303static inline void __raw_read_unlock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
305 unsigned int tmp;
306
Ralf Baechle17099b12007-07-14 13:24:05 +0100307 smp_llsc_mb();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 if (R10000_LLSC_WAR) {
310 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700311 "1: ll %1, %2 # __raw_read_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 " sub %1, 1 \n"
313 " sc %1, %0 \n"
314 " beqzl %1, 1b \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 : "=m" (rw->lock), "=&r" (tmp)
316 : "m" (rw->lock)
317 : "memory");
318 } else {
319 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700320 " .set noreorder # __raw_read_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 "1: ll %1, %2 \n"
322 " sub %1, 1 \n"
323 " sc %1, %0 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100324 " beqz %1, 2f \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000325 " nop \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100326 " .subsection 2 \n"
327 "2: b 1b \n"
328 " nop \n"
329 " .previous \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 " .set reorder \n"
331 : "=m" (rw->lock), "=&r" (tmp)
332 : "m" (rw->lock)
333 : "memory");
334 }
335}
336
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700337static inline void __raw_write_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
339 unsigned int tmp;
340
341 if (R10000_LLSC_WAR) {
342 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700343 " .set noreorder # __raw_write_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 "1: ll %1, %2 \n"
345 " bnez %1, 1b \n"
346 " lui %1, 0x8000 \n"
347 " sc %1, %0 \n"
348 " beqzl %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000349 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 " .set reorder \n"
351 : "=m" (rw->lock), "=&r" (tmp)
352 : "m" (rw->lock)
353 : "memory");
354 } else {
355 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700356 " .set noreorder # __raw_write_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 "1: ll %1, %2 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100358 " bnez %1, 2f \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 " lui %1, 0x8000 \n"
360 " sc %1, %0 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100361 " beqz %1, 2f \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000362 " nop \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100363 " .subsection 2 \n"
364 "2: ll %1, %2 \n"
365 " bnez %1, 2b \n"
366 " lui %1, 0x8000 \n"
367 " b 1b \n"
368 " nop \n"
369 " .previous \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 " .set reorder \n"
371 : "=m" (rw->lock), "=&r" (tmp)
372 : "m" (rw->lock)
373 : "memory");
374 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000375
Ralf Baechle17099b12007-07-14 13:24:05 +0100376 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700379static inline void __raw_write_unlock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000381 smp_mb();
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 __asm__ __volatile__(
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000384 " # __raw_write_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 " sw $0, %0 \n"
386 : "=m" (rw->lock)
387 : "m" (rw->lock)
388 : "memory");
389}
390
Ralf Baechle65316fd2006-08-31 14:16:06 +0100391static inline int __raw_read_trylock(raw_rwlock_t *rw)
392{
393 unsigned int tmp;
394 int ret;
395
396 if (R10000_LLSC_WAR) {
397 __asm__ __volatile__(
398 " .set noreorder # __raw_read_trylock \n"
399 " li %2, 0 \n"
400 "1: ll %1, %3 \n"
Dave Johnsond52c2d52007-03-05 20:50:27 -0500401 " bltz %1, 2f \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100402 " addu %1, 1 \n"
403 " sc %1, %0 \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100404 " .set reorder \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000405 " beqzl %1, 1b \n"
406 " nop \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100407 __WEAK_LLSC_MB
Ralf Baechle65316fd2006-08-31 14:16:06 +0100408 " li %2, 1 \n"
409 "2: \n"
410 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
411 : "m" (rw->lock)
412 : "memory");
413 } else {
414 __asm__ __volatile__(
415 " .set noreorder # __raw_read_trylock \n"
416 " li %2, 0 \n"
417 "1: ll %1, %3 \n"
Dave Johnsond52c2d52007-03-05 20:50:27 -0500418 " bltz %1, 2f \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100419 " addu %1, 1 \n"
420 " sc %1, %0 \n"
421 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000422 " nop \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100423 " .set reorder \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100424 __WEAK_LLSC_MB
Ralf Baechle65316fd2006-08-31 14:16:06 +0100425 " li %2, 1 \n"
426 "2: \n"
427 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
428 : "m" (rw->lock)
429 : "memory");
430 }
431
432 return ret;
433}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700435static inline int __raw_write_trylock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436{
437 unsigned int tmp;
438 int ret;
439
440 if (R10000_LLSC_WAR) {
441 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700442 " .set noreorder # __raw_write_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 " li %2, 0 \n"
444 "1: ll %1, %3 \n"
445 " bnez %1, 2f \n"
446 " lui %1, 0x8000 \n"
447 " sc %1, %0 \n"
448 " beqzl %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000449 " nop \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100450 __WEAK_LLSC_MB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 " li %2, 1 \n"
452 " .set reorder \n"
453 "2: \n"
454 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
455 : "m" (rw->lock)
456 : "memory");
457 } else {
458 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700459 " .set noreorder # __raw_write_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 " li %2, 0 \n"
461 "1: ll %1, %3 \n"
462 " bnez %1, 2f \n"
463 " lui %1, 0x8000 \n"
464 " sc %1, %0 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100465 " beqz %1, 3f \n"
466 " li %2, 1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 "2: \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100468 __WEAK_LLSC_MB
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100469 " .subsection 2 \n"
470 "3: b 1b \n"
471 " li %2, 0 \n"
472 " .previous \n"
473 " .set reorder \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
475 : "m" (rw->lock)
476 : "memory");
477 }
478
479 return ret;
480}
481
Ralf Baechle65316fd2006-08-31 14:16:06 +0100482
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700483#define _raw_spin_relax(lock) cpu_relax()
484#define _raw_read_relax(lock) cpu_relax()
485#define _raw_write_relax(lock) cpu_relax()
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487#endif /* _ASM_SPINLOCK_H */