blob: 7d1cc75a1cbb0f07bac30ea900114666eace6939 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
12#include <linux/config.h>
13#include <asm/war.h>
14
15/*
16 * Your basic SMP spinlocks, allowing only a single CPU anywhere
17 */
18
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070019#define __raw_spin_is_locked(x) ((x)->lock != 0)
20#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
21#define __raw_spin_unlock_wait(x) \
22 do { cpu_relax(); } while ((x)->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24/*
25 * Simple spin lock operations. There are two variants, one clears IRQ's
26 * on the local processor, one does not.
27 *
28 * We make no fairness assumptions. They have a cost.
29 */
30
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070031static inline void __raw_spin_lock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
33 unsigned int tmp;
34
35 if (R10000_LLSC_WAR) {
36 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070037 " .set noreorder # __raw_spin_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 "1: ll %1, %2 \n"
39 " bnez %1, 1b \n"
40 " li %1, 1 \n"
41 " sc %1, %0 \n"
42 " beqzl %1, 1b \n"
43 " nop \n"
44 " sync \n"
45 " .set reorder \n"
46 : "=m" (lock->lock), "=&r" (tmp)
47 : "m" (lock->lock)
48 : "memory");
49 } else {
50 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070051 " .set noreorder # __raw_spin_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 "1: ll %1, %2 \n"
53 " bnez %1, 1b \n"
54 " li %1, 1 \n"
55 " sc %1, %0 \n"
56 " beqz %1, 1b \n"
57 " sync \n"
58 " .set reorder \n"
59 : "=m" (lock->lock), "=&r" (tmp)
60 : "m" (lock->lock)
61 : "memory");
62 }
63}
64
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070065static inline void __raw_spin_unlock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
67 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070068 " .set noreorder # __raw_spin_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 " sync \n"
70 " sw $0, %0 \n"
71 " .set\treorder \n"
72 : "=m" (lock->lock)
73 : "m" (lock->lock)
74 : "memory");
75}
76
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070077static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
79 unsigned int temp, res;
80
81 if (R10000_LLSC_WAR) {
82 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070083 " .set noreorder # __raw_spin_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 "1: ll %0, %3 \n"
85 " ori %2, %0, 1 \n"
86 " sc %2, %1 \n"
87 " beqzl %2, 1b \n"
88 " nop \n"
89 " andi %2, %0, 1 \n"
90 " sync \n"
91 " .set reorder"
92 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
93 : "m" (lock->lock)
94 : "memory");
95 } else {
96 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070097 " .set noreorder # __raw_spin_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 "1: ll %0, %3 \n"
99 " ori %2, %0, 1 \n"
100 " sc %2, %1 \n"
101 " beqz %2, 1b \n"
102 " andi %2, %0, 1 \n"
103 " sync \n"
104 " .set reorder"
105 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
106 : "m" (lock->lock)
107 : "memory");
108 }
109
110 return res == 0;
111}
112
113/*
114 * Read-write spinlocks, allowing multiple readers but only one writer.
115 *
116 * NOTE! it is quite common to have readers in interrupts but no interrupt
117 * writers. For those circumstances we can "mix" irq-safe locks - any writer
118 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
119 * read-locks.
120 */
121
Ralf Baechlee3c48072005-02-03 13:34:45 +0000122/*
123 * read_can_lock - would read_trylock() succeed?
124 * @lock: the rwlock in question.
125 */
126#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
127
128/*
129 * write_can_lock - would write_trylock() succeed?
130 * @lock: the rwlock in question.
131 */
132#define __raw_write_can_lock(rw) (!(rw)->lock)
133
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700134static inline void __raw_read_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
136 unsigned int tmp;
137
138 if (R10000_LLSC_WAR) {
139 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700140 " .set noreorder # __raw_read_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 "1: ll %1, %2 \n"
142 " bltz %1, 1b \n"
143 " addu %1, 1 \n"
144 " sc %1, %0 \n"
145 " beqzl %1, 1b \n"
146 " nop \n"
147 " sync \n"
148 " .set reorder \n"
149 : "=m" (rw->lock), "=&r" (tmp)
150 : "m" (rw->lock)
151 : "memory");
152 } else {
153 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700154 " .set noreorder # __raw_read_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 "1: ll %1, %2 \n"
156 " bltz %1, 1b \n"
157 " addu %1, 1 \n"
158 " sc %1, %0 \n"
159 " beqz %1, 1b \n"
160 " sync \n"
161 " .set reorder \n"
162 : "=m" (rw->lock), "=&r" (tmp)
163 : "m" (rw->lock)
164 : "memory");
165 }
166}
167
168/* Note the use of sub, not subu which will make the kernel die with an
169 overflow exception if we ever try to unlock an rwlock that is already
170 unlocked or is being held by a writer. */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700171static inline void __raw_read_unlock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
173 unsigned int tmp;
174
175 if (R10000_LLSC_WAR) {
176 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700177 "1: ll %1, %2 # __raw_read_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 " sub %1, 1 \n"
179 " sc %1, %0 \n"
180 " beqzl %1, 1b \n"
181 " sync \n"
182 : "=m" (rw->lock), "=&r" (tmp)
183 : "m" (rw->lock)
184 : "memory");
185 } else {
186 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700187 " .set noreorder # __raw_read_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 "1: ll %1, %2 \n"
189 " sub %1, 1 \n"
190 " sc %1, %0 \n"
191 " beqz %1, 1b \n"
192 " sync \n"
193 " .set reorder \n"
194 : "=m" (rw->lock), "=&r" (tmp)
195 : "m" (rw->lock)
196 : "memory");
197 }
198}
199
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700200static inline void __raw_write_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned int tmp;
203
204 if (R10000_LLSC_WAR) {
205 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700206 " .set noreorder # __raw_write_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 "1: ll %1, %2 \n"
208 " bnez %1, 1b \n"
209 " lui %1, 0x8000 \n"
210 " sc %1, %0 \n"
211 " beqzl %1, 1b \n"
Ralf Baechleb63014a2005-04-14 15:28:28 +0000212 " sync \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 " .set reorder \n"
214 : "=m" (rw->lock), "=&r" (tmp)
215 : "m" (rw->lock)
216 : "memory");
217 } else {
218 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700219 " .set noreorder # __raw_write_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 "1: ll %1, %2 \n"
221 " bnez %1, 1b \n"
222 " lui %1, 0x8000 \n"
223 " sc %1, %0 \n"
224 " beqz %1, 1b \n"
Ralf Baechleb63014a2005-04-14 15:28:28 +0000225 " sync \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 " .set reorder \n"
227 : "=m" (rw->lock), "=&r" (tmp)
228 : "m" (rw->lock)
229 : "memory");
230 }
231}
232
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700233static inline void __raw_write_unlock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
235 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700236 " sync # __raw_write_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 " sw $0, %0 \n"
238 : "=m" (rw->lock)
239 : "m" (rw->lock)
240 : "memory");
241}
242
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700243#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700245static inline int __raw_write_trylock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
247 unsigned int tmp;
248 int ret;
249
250 if (R10000_LLSC_WAR) {
251 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700252 " .set noreorder # __raw_write_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 " li %2, 0 \n"
254 "1: ll %1, %3 \n"
255 " bnez %1, 2f \n"
256 " lui %1, 0x8000 \n"
257 " sc %1, %0 \n"
258 " beqzl %1, 1b \n"
Ralf Baechleb63014a2005-04-14 15:28:28 +0000259 " sync \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 " li %2, 1 \n"
261 " .set reorder \n"
262 "2: \n"
263 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
264 : "m" (rw->lock)
265 : "memory");
266 } else {
267 __asm__ __volatile__(
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700268 " .set noreorder # __raw_write_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 " li %2, 0 \n"
270 "1: ll %1, %3 \n"
271 " bnez %1, 2f \n"
272 " lui %1, 0x8000 \n"
273 " sc %1, %0 \n"
274 " beqz %1, 1b \n"
275 " sync \n"
276 " li %2, 1 \n"
277 " .set reorder \n"
278 "2: \n"
279 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
280 : "m" (rw->lock)
281 : "memory");
282 }
283
284 return ret;
285}
286
287#endif /* _ASM_SPINLOCK_H */