blob: 0006fe9f8c7a03cab556cec5119ad5c016608e74 [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001/* spinlock.h: 64-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC64_SPINLOCK_H
7#define __SPARC64_SPINLOCK_H
8
9#include <linux/threads.h> /* For NR_CPUS */
10
11#ifndef __ASSEMBLY__
12
13/* To get debugging spinlocks which detect and catch
14 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
15 * and rebuild your kernel.
16 */
17
18/* All of these locking primitives are expected to work properly
19 * even in an RMO memory model, which currently is what the kernel
20 * runs in.
21 *
22 * There is another issue. Because we play games to save cycles
23 * in the non-contention case, we need to be extra careful about
24 * branch targets into the "spinning" code. They live in their
25 * own section, but the newer V9 branches have a shorter range
26 * than the traditional 32-bit sparc branch variants. The rule
27 * is that the branches that go into and out of the spinner sections
28 * must be pre-V9 branches.
29 */
30
31#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
32
33#define __raw_spin_unlock_wait(lp) \
34 do { rmb(); \
35 } while((lp)->lock)
36
37static inline void __raw_spin_lock(raw_spinlock_t *lock)
38{
39 unsigned long tmp;
40
41 __asm__ __volatile__(
42"1: ldstub [%1], %0\n"
43" membar #StoreLoad | #StoreStore\n"
44" brnz,pn %0, 2f\n"
45" nop\n"
46" .subsection 2\n"
47"2: ldub [%1], %0\n"
48" membar #LoadLoad\n"
49" brnz,pt %0, 2b\n"
50" nop\n"
51" ba,a,pt %%xcc, 1b\n"
52" .previous"
53 : "=&r" (tmp)
54 : "r" (lock)
55 : "memory");
56}
57
58static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59{
60 unsigned long result;
61
62 __asm__ __volatile__(
63" ldstub [%1], %0\n"
64" membar #StoreLoad | #StoreStore"
65 : "=r" (result)
66 : "r" (lock)
67 : "memory");
68
69 return (result == 0UL);
70}
71
72static inline void __raw_spin_unlock(raw_spinlock_t *lock)
73{
74 __asm__ __volatile__(
75" membar #StoreStore | #LoadStore\n"
76" stb %%g0, [%0]"
77 : /* No outputs */
78 : "r" (lock)
79 : "memory");
80}
81
82static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
83{
84 unsigned long tmp1, tmp2;
85
86 __asm__ __volatile__(
87"1: ldstub [%2], %0\n"
88" membar #StoreLoad | #StoreStore\n"
89" brnz,pn %0, 2f\n"
90" nop\n"
91" .subsection 2\n"
92"2: rdpr %%pil, %1\n"
93" wrpr %3, %%pil\n"
94"3: ldub [%2], %0\n"
95" membar #LoadLoad\n"
96" brnz,pt %0, 3b\n"
97" nop\n"
98" ba,pt %%xcc, 1b\n"
99" wrpr %1, %%pil\n"
100" .previous"
101 : "=&r" (tmp1), "=&r" (tmp2)
102 : "r"(lock), "r"(flags)
103 : "memory");
104}
105
106/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
107
108static void inline __read_lock(raw_rwlock_t *lock)
109{
110 unsigned long tmp1, tmp2;
111
112 __asm__ __volatile__ (
113"1: ldsw [%2], %0\n"
114" brlz,pn %0, 2f\n"
115"4: add %0, 1, %1\n"
116" cas [%2], %0, %1\n"
117" cmp %0, %1\n"
118" membar #StoreLoad | #StoreStore\n"
119" bne,pn %%icc, 1b\n"
120" nop\n"
121" .subsection 2\n"
122"2: ldsw [%2], %0\n"
123" membar #LoadLoad\n"
124" brlz,pt %0, 2b\n"
125" nop\n"
126" ba,a,pt %%xcc, 4b\n"
127" .previous"
128 : "=&r" (tmp1), "=&r" (tmp2)
129 : "r" (lock)
130 : "memory");
131}
132
133static int inline __read_trylock(raw_rwlock_t *lock)
134{
135 int tmp1, tmp2;
136
137 __asm__ __volatile__ (
138"1: ldsw [%2], %0\n"
139" brlz,a,pn %0, 2f\n"
140" mov 0, %0\n"
141" add %0, 1, %1\n"
142" cas [%2], %0, %1\n"
143" cmp %0, %1\n"
144" membar #StoreLoad | #StoreStore\n"
145" bne,pn %%icc, 1b\n"
146" mov 1, %0\n"
147"2:"
148 : "=&r" (tmp1), "=&r" (tmp2)
149 : "r" (lock)
150 : "memory");
151
152 return tmp1;
153}
154
155static void inline __read_unlock(raw_rwlock_t *lock)
156{
157 unsigned long tmp1, tmp2;
158
159 __asm__ __volatile__(
160" membar #StoreLoad | #LoadLoad\n"
161"1: lduw [%2], %0\n"
162" sub %0, 1, %1\n"
163" cas [%2], %0, %1\n"
164" cmp %0, %1\n"
165" bne,pn %%xcc, 1b\n"
166" nop"
167 : "=&r" (tmp1), "=&r" (tmp2)
168 : "r" (lock)
169 : "memory");
170}
171
172static void inline __write_lock(raw_rwlock_t *lock)
173{
174 unsigned long mask, tmp1, tmp2;
175
176 mask = 0x80000000UL;
177
178 __asm__ __volatile__(
179"1: lduw [%2], %0\n"
180" brnz,pn %0, 2f\n"
181"4: or %0, %3, %1\n"
182" cas [%2], %0, %1\n"
183" cmp %0, %1\n"
184" membar #StoreLoad | #StoreStore\n"
185" bne,pn %%icc, 1b\n"
186" nop\n"
187" .subsection 2\n"
188"2: lduw [%2], %0\n"
189" membar #LoadLoad\n"
190" brnz,pt %0, 2b\n"
191" nop\n"
192" ba,a,pt %%xcc, 4b\n"
193" .previous"
194 : "=&r" (tmp1), "=&r" (tmp2)
195 : "r" (lock), "r" (mask)
196 : "memory");
197}
198
199static void inline __write_unlock(raw_rwlock_t *lock)
200{
201 __asm__ __volatile__(
202" membar #LoadStore | #StoreStore\n"
203" stw %%g0, [%0]"
204 : /* no outputs */
205 : "r" (lock)
206 : "memory");
207}
208
209static int inline __write_trylock(raw_rwlock_t *lock)
210{
211 unsigned long mask, tmp1, tmp2, result;
212
213 mask = 0x80000000UL;
214
215 __asm__ __volatile__(
216" mov 0, %2\n"
217"1: lduw [%3], %0\n"
218" brnz,pn %0, 2f\n"
219" or %0, %4, %1\n"
220" cas [%3], %0, %1\n"
221" cmp %0, %1\n"
222" membar #StoreLoad | #StoreStore\n"
223" bne,pn %%icc, 1b\n"
224" nop\n"
225" mov 1, %2\n"
226"2:"
227 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
228 : "r" (lock), "r" (mask)
229 : "memory");
230
231 return result;
232}
233
234#define __raw_read_lock(p) __read_lock(p)
235#define __raw_read_trylock(p) __read_trylock(p)
236#define __raw_read_unlock(p) __read_unlock(p)
237#define __raw_write_lock(p) __write_lock(p)
238#define __raw_write_unlock(p) __write_unlock(p)
239#define __raw_write_trylock(p) __write_trylock(p)
240
241#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
242#define __raw_write_can_lock(rw) (!(rw)->lock)
243
244#define _raw_spin_relax(lock) cpu_relax()
245#define _raw_read_relax(lock) cpu_relax()
246#define _raw_write_relax(lock) cpu_relax()
247
248#endif /* !(__ASSEMBLY__) */
249
250#endif /* !(__SPARC64_SPINLOCK_H) */