blob: 9cb93a5c2b4feac61ab2245c9ca17cb74997ded0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* spinlock.h: 64-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC64_SPINLOCK_H
7#define __SPARC64_SPINLOCK_H
8
9#include <linux/config.h>
10#include <linux/threads.h> /* For NR_CPUS */
11
12#ifndef __ASSEMBLY__
13
14/* To get debugging spinlocks which detect and catch
15 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
16 * and rebuild your kernel.
17 */
18
19/* All of these locking primitives are expected to work properly
20 * even in an RMO memory model, which currently is what the kernel
21 * runs in.
22 *
23 * There is another issue. Because we play games to save cycles
24 * in the non-contention case, we need to be extra careful about
25 * branch targets into the "spinning" code. They live in their
26 * own section, but the newer V9 branches have a shorter range
27 * than the traditional 32-bit sparc branch variants. The rule
28 * is that the branches that go into and out of the spinner sections
29 * must be pre-V9 branches.
30 */
31
32#ifndef CONFIG_DEBUG_SPINLOCK
33
Al Viro489ec5f2005-04-20 17:12:41 -070034typedef struct {
35 volatile unsigned char lock;
36#ifdef CONFIG_PREEMPT
37 unsigned int break_lock;
38#endif
39} spinlock_t;
40#define SPIN_LOCK_UNLOCKED (spinlock_t) {0,}
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Al Viro489ec5f2005-04-20 17:12:41 -070042#define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
43#define spin_is_locked(lp) ((lp)->lock != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Al Viro489ec5f2005-04-20 17:12:41 -070045#define spin_unlock_wait(lp) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070046do { membar("#LoadLoad"); \
David S. Miller9a59c182005-04-24 21:04:02 -070047} while((lp)->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49static inline void _raw_spin_lock(spinlock_t *lock)
50{
51 unsigned long tmp;
52
53 __asm__ __volatile__(
54"1: ldstub [%1], %0\n"
David S. Millerb445e262005-06-27 15:42:04 -070055" membar #StoreLoad | #StoreStore\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070056" brnz,pn %0, 2f\n"
David S. Millerb445e262005-06-27 15:42:04 -070057" nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070058" .subsection 2\n"
59"2: ldub [%1], %0\n"
David S. Millerb445e262005-06-27 15:42:04 -070060" membar #LoadLoad\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070061" brnz,pt %0, 2b\n"
David S. Millerb445e262005-06-27 15:42:04 -070062" nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070063" ba,a,pt %%xcc, 1b\n"
64" .previous"
65 : "=&r" (tmp)
66 : "r" (lock)
67 : "memory");
68}
69
70static inline int _raw_spin_trylock(spinlock_t *lock)
71{
72 unsigned long result;
73
74 __asm__ __volatile__(
75" ldstub [%1], %0\n"
76" membar #StoreLoad | #StoreStore"
77 : "=r" (result)
78 : "r" (lock)
79 : "memory");
80
81 return (result == 0UL);
82}
83
84static inline void _raw_spin_unlock(spinlock_t *lock)
85{
86 __asm__ __volatile__(
87" membar #StoreStore | #LoadStore\n"
88" stb %%g0, [%0]"
89 : /* No outputs */
90 : "r" (lock)
91 : "memory");
92}
93
94static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
95{
96 unsigned long tmp1, tmp2;
97
98 __asm__ __volatile__(
99"1: ldstub [%2], %0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100" membar #StoreLoad | #StoreStore\n"
David S. Millerb445e262005-06-27 15:42:04 -0700101" brnz,pn %0, 2f\n"
102" nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103" .subsection 2\n"
104"2: rdpr %%pil, %1\n"
105" wrpr %3, %%pil\n"
106"3: ldub [%2], %0\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107" membar #LoadLoad\n"
David S. Millerb445e262005-06-27 15:42:04 -0700108" brnz,pt %0, 3b\n"
109" nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110" ba,pt %%xcc, 1b\n"
David S. Millerb445e262005-06-27 15:42:04 -0700111" wrpr %1, %%pil\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112" .previous"
113 : "=&r" (tmp1), "=&r" (tmp2)
114 : "r"(lock), "r"(flags)
115 : "memory");
116}
117
118#else /* !(CONFIG_DEBUG_SPINLOCK) */
119
120typedef struct {
Al Viro489ec5f2005-04-20 17:12:41 -0700121 volatile unsigned char lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 unsigned int owner_pc, owner_cpu;
Al Viro489ec5f2005-04-20 17:12:41 -0700123#ifdef CONFIG_PREEMPT
124 unsigned int break_lock;
125#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126} spinlock_t;
127#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
Al Viro489ec5f2005-04-20 17:12:41 -0700128#define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
129#define spin_is_locked(__lock) ((__lock)->lock != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#define spin_unlock_wait(__lock) \
131do { \
132 membar("#LoadLoad"); \
Al Viro489ec5f2005-04-20 17:12:41 -0700133} while((__lock)->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135extern void _do_spin_lock (spinlock_t *lock, char *str);
136extern void _do_spin_unlock (spinlock_t *lock);
137extern int _do_spin_trylock (spinlock_t *lock);
138
139#define _raw_spin_trylock(lp) _do_spin_trylock(lp)
140#define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
141#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
142#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
143
144#endif /* CONFIG_DEBUG_SPINLOCK */
145
146/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
147
148#ifndef CONFIG_DEBUG_SPINLOCK
149
Al Viro489ec5f2005-04-20 17:12:41 -0700150typedef struct {
151 volatile unsigned int lock;
152#ifdef CONFIG_PREEMPT
153 unsigned int break_lock;
154#endif
155} rwlock_t;
David S. Miller9a59c182005-04-24 21:04:02 -0700156#define RW_LOCK_UNLOCKED (rwlock_t) {0,}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
158
159static void inline __read_lock(rwlock_t *lock)
160{
161 unsigned long tmp1, tmp2;
162
163 __asm__ __volatile__ (
164"1: ldsw [%2], %0\n"
165" brlz,pn %0, 2f\n"
166"4: add %0, 1, %1\n"
167" cas [%2], %0, %1\n"
168" cmp %0, %1\n"
David S. Millerb445e262005-06-27 15:42:04 -0700169" membar #StoreLoad | #StoreStore\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170" bne,pn %%icc, 1b\n"
David S. Millerb445e262005-06-27 15:42:04 -0700171" nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172" .subsection 2\n"
173"2: ldsw [%2], %0\n"
David S. Millerb445e262005-06-27 15:42:04 -0700174" membar #LoadLoad\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175" brlz,pt %0, 2b\n"
David S. Millerb445e262005-06-27 15:42:04 -0700176" nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177" ba,a,pt %%xcc, 4b\n"
178" .previous"
179 : "=&r" (tmp1), "=&r" (tmp2)
180 : "r" (lock)
181 : "memory");
182}
183
184static void inline __read_unlock(rwlock_t *lock)
185{
186 unsigned long tmp1, tmp2;
187
188 __asm__ __volatile__(
189" membar #StoreLoad | #LoadLoad\n"
190"1: lduw [%2], %0\n"
191" sub %0, 1, %1\n"
192" cas [%2], %0, %1\n"
193" cmp %0, %1\n"
194" bne,pn %%xcc, 1b\n"
195" nop"
196 : "=&r" (tmp1), "=&r" (tmp2)
197 : "r" (lock)
198 : "memory");
199}
200
201static void inline __write_lock(rwlock_t *lock)
202{
203 unsigned long mask, tmp1, tmp2;
204
205 mask = 0x80000000UL;
206
207 __asm__ __volatile__(
208"1: lduw [%2], %0\n"
209" brnz,pn %0, 2f\n"
210"4: or %0, %3, %1\n"
211" cas [%2], %0, %1\n"
212" cmp %0, %1\n"
David S. Millerb445e262005-06-27 15:42:04 -0700213" membar #StoreLoad | #StoreStore\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214" bne,pn %%icc, 1b\n"
David S. Millerb445e262005-06-27 15:42:04 -0700215" nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216" .subsection 2\n"
217"2: lduw [%2], %0\n"
David S. Millerb445e262005-06-27 15:42:04 -0700218" membar #LoadLoad\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219" brnz,pt %0, 2b\n"
David S. Millerb445e262005-06-27 15:42:04 -0700220" nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221" ba,a,pt %%xcc, 4b\n"
222" .previous"
223 : "=&r" (tmp1), "=&r" (tmp2)
224 : "r" (lock), "r" (mask)
225 : "memory");
226}
227
228static void inline __write_unlock(rwlock_t *lock)
229{
230 __asm__ __volatile__(
231" membar #LoadStore | #StoreStore\n"
232" stw %%g0, [%0]"
233 : /* no outputs */
234 : "r" (lock)
235 : "memory");
236}
237
238static int inline __write_trylock(rwlock_t *lock)
239{
240 unsigned long mask, tmp1, tmp2, result;
241
242 mask = 0x80000000UL;
243
244 __asm__ __volatile__(
245" mov 0, %2\n"
246"1: lduw [%3], %0\n"
247" brnz,pn %0, 2f\n"
248" or %0, %4, %1\n"
249" cas [%3], %0, %1\n"
250" cmp %0, %1\n"
David S. Millerb445e262005-06-27 15:42:04 -0700251" membar #StoreLoad | #StoreStore\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252" bne,pn %%icc, 1b\n"
David S. Millerb445e262005-06-27 15:42:04 -0700253" nop\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254" mov 1, %2\n"
255"2:"
256 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
257 : "r" (lock), "r" (mask)
258 : "memory");
259
260 return result;
261}
262
263#define _raw_read_lock(p) __read_lock(p)
264#define _raw_read_unlock(p) __read_unlock(p)
265#define _raw_write_lock(p) __write_lock(p)
266#define _raw_write_unlock(p) __write_unlock(p)
267#define _raw_write_trylock(p) __write_trylock(p)
268
269#else /* !(CONFIG_DEBUG_SPINLOCK) */
270
271typedef struct {
Al Viro489ec5f2005-04-20 17:12:41 -0700272 volatile unsigned long lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 unsigned int writer_pc, writer_cpu;
274 unsigned int reader_pc[NR_CPUS];
Al Viro489ec5f2005-04-20 17:12:41 -0700275#ifdef CONFIG_PREEMPT
276 unsigned int break_lock;
277#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278} rwlock_t;
279#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
280#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
281
282extern void _do_read_lock(rwlock_t *rw, char *str);
283extern void _do_read_unlock(rwlock_t *rw, char *str);
284extern void _do_write_lock(rwlock_t *rw, char *str);
285extern void _do_write_unlock(rwlock_t *rw);
286extern int _do_write_trylock(rwlock_t *rw, char *str);
287
288#define _raw_read_lock(lock) \
289do { unsigned long flags; \
290 local_irq_save(flags); \
291 _do_read_lock(lock, "read_lock"); \
292 local_irq_restore(flags); \
293} while(0)
294
295#define _raw_read_unlock(lock) \
296do { unsigned long flags; \
297 local_irq_save(flags); \
298 _do_read_unlock(lock, "read_unlock"); \
299 local_irq_restore(flags); \
300} while(0)
301
302#define _raw_write_lock(lock) \
303do { unsigned long flags; \
304 local_irq_save(flags); \
305 _do_write_lock(lock, "write_lock"); \
306 local_irq_restore(flags); \
307} while(0)
308
309#define _raw_write_unlock(lock) \
310do { unsigned long flags; \
311 local_irq_save(flags); \
312 _do_write_unlock(lock); \
313 local_irq_restore(flags); \
314} while(0)
315
316#define _raw_write_trylock(lock) \
317({ unsigned long flags; \
318 int val; \
319 local_irq_save(flags); \
320 val = _do_write_trylock(lock, "write_trylock"); \
321 local_irq_restore(flags); \
322 val; \
323})
324
325#endif /* CONFIG_DEBUG_SPINLOCK */
326
327#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
Al Viro489ec5f2005-04-20 17:12:41 -0700328#define read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
329#define write_can_lock(rw) (!(rw)->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331#endif /* !(__ASSEMBLY__) */
332
333#endif /* !(__SPARC64_SPINLOCK_H) */