blob: fbac9d00744a5e085c603c57aa579ef7d71f26ee [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001/* spinlock.h: 64-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC64_SPINLOCK_H
7#define __SPARC64_SPINLOCK_H
8
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07009#ifndef __ASSEMBLY__
10
11/* To get debugging spinlocks which detect and catch
12 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
13 * and rebuild your kernel.
14 */
15
David S. Miller64f2dde2008-10-29 21:25:00 -070016/* Because we play games to save cycles in the non-contention case, we
17 * need to be extra careful about branch targets into the "spinning"
18 * code. They live in their own section, but the newer V9 branches
19 * have a shorter range than the traditional 32-bit sparc branch
20 * variants. The rule is that the branches that go into and out of
21 * the spinner sections must be pre-V9 branches.
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070022 */
23
24#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
25
26#define __raw_spin_unlock_wait(lp) \
27 do { rmb(); \
28 } while((lp)->lock)
29
30static inline void __raw_spin_lock(raw_spinlock_t *lock)
31{
32 unsigned long tmp;
33
34 __asm__ __volatile__(
35"1: ldstub [%1], %0\n"
36" membar #StoreLoad | #StoreStore\n"
37" brnz,pn %0, 2f\n"
38" nop\n"
39" .subsection 2\n"
40"2: ldub [%1], %0\n"
41" membar #LoadLoad\n"
42" brnz,pt %0, 2b\n"
43" nop\n"
44" ba,a,pt %%xcc, 1b\n"
45" .previous"
46 : "=&r" (tmp)
47 : "r" (lock)
48 : "memory");
49}
50
51static inline int __raw_spin_trylock(raw_spinlock_t *lock)
52{
53 unsigned long result;
54
55 __asm__ __volatile__(
56" ldstub [%1], %0\n"
57" membar #StoreLoad | #StoreStore"
58 : "=r" (result)
59 : "r" (lock)
60 : "memory");
61
62 return (result == 0UL);
63}
64
65static inline void __raw_spin_unlock(raw_spinlock_t *lock)
66{
67 __asm__ __volatile__(
68" membar #StoreStore | #LoadStore\n"
69" stb %%g0, [%0]"
70 : /* No outputs */
71 : "r" (lock)
72 : "memory");
73}
74
75static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
76{
77 unsigned long tmp1, tmp2;
78
79 __asm__ __volatile__(
80"1: ldstub [%2], %0\n"
81" membar #StoreLoad | #StoreStore\n"
82" brnz,pn %0, 2f\n"
83" nop\n"
84" .subsection 2\n"
85"2: rdpr %%pil, %1\n"
86" wrpr %3, %%pil\n"
87"3: ldub [%2], %0\n"
88" membar #LoadLoad\n"
89" brnz,pt %0, 3b\n"
90" nop\n"
91" ba,pt %%xcc, 1b\n"
92" wrpr %1, %%pil\n"
93" .previous"
94 : "=&r" (tmp1), "=&r" (tmp2)
95 : "r"(lock), "r"(flags)
96 : "memory");
97}
98
99/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
100
101static void inline __read_lock(raw_rwlock_t *lock)
102{
103 unsigned long tmp1, tmp2;
104
105 __asm__ __volatile__ (
106"1: ldsw [%2], %0\n"
107" brlz,pn %0, 2f\n"
108"4: add %0, 1, %1\n"
109" cas [%2], %0, %1\n"
110" cmp %0, %1\n"
111" membar #StoreLoad | #StoreStore\n"
112" bne,pn %%icc, 1b\n"
113" nop\n"
114" .subsection 2\n"
115"2: ldsw [%2], %0\n"
116" membar #LoadLoad\n"
117" brlz,pt %0, 2b\n"
118" nop\n"
119" ba,a,pt %%xcc, 4b\n"
120" .previous"
121 : "=&r" (tmp1), "=&r" (tmp2)
122 : "r" (lock)
123 : "memory");
124}
125
126static int inline __read_trylock(raw_rwlock_t *lock)
127{
128 int tmp1, tmp2;
129
130 __asm__ __volatile__ (
131"1: ldsw [%2], %0\n"
132" brlz,a,pn %0, 2f\n"
133" mov 0, %0\n"
134" add %0, 1, %1\n"
135" cas [%2], %0, %1\n"
136" cmp %0, %1\n"
137" membar #StoreLoad | #StoreStore\n"
138" bne,pn %%icc, 1b\n"
139" mov 1, %0\n"
140"2:"
141 : "=&r" (tmp1), "=&r" (tmp2)
142 : "r" (lock)
143 : "memory");
144
145 return tmp1;
146}
147
148static void inline __read_unlock(raw_rwlock_t *lock)
149{
150 unsigned long tmp1, tmp2;
151
152 __asm__ __volatile__(
153" membar #StoreLoad | #LoadLoad\n"
154"1: lduw [%2], %0\n"
155" sub %0, 1, %1\n"
156" cas [%2], %0, %1\n"
157" cmp %0, %1\n"
158" bne,pn %%xcc, 1b\n"
159" nop"
160 : "=&r" (tmp1), "=&r" (tmp2)
161 : "r" (lock)
162 : "memory");
163}
164
165static void inline __write_lock(raw_rwlock_t *lock)
166{
167 unsigned long mask, tmp1, tmp2;
168
169 mask = 0x80000000UL;
170
171 __asm__ __volatile__(
172"1: lduw [%2], %0\n"
173" brnz,pn %0, 2f\n"
174"4: or %0, %3, %1\n"
175" cas [%2], %0, %1\n"
176" cmp %0, %1\n"
177" membar #StoreLoad | #StoreStore\n"
178" bne,pn %%icc, 1b\n"
179" nop\n"
180" .subsection 2\n"
181"2: lduw [%2], %0\n"
182" membar #LoadLoad\n"
183" brnz,pt %0, 2b\n"
184" nop\n"
185" ba,a,pt %%xcc, 4b\n"
186" .previous"
187 : "=&r" (tmp1), "=&r" (tmp2)
188 : "r" (lock), "r" (mask)
189 : "memory");
190}
191
192static void inline __write_unlock(raw_rwlock_t *lock)
193{
194 __asm__ __volatile__(
195" membar #LoadStore | #StoreStore\n"
196" stw %%g0, [%0]"
197 : /* no outputs */
198 : "r" (lock)
199 : "memory");
200}
201
202static int inline __write_trylock(raw_rwlock_t *lock)
203{
204 unsigned long mask, tmp1, tmp2, result;
205
206 mask = 0x80000000UL;
207
208 __asm__ __volatile__(
209" mov 0, %2\n"
210"1: lduw [%3], %0\n"
211" brnz,pn %0, 2f\n"
212" or %0, %4, %1\n"
213" cas [%3], %0, %1\n"
214" cmp %0, %1\n"
215" membar #StoreLoad | #StoreStore\n"
216" bne,pn %%icc, 1b\n"
217" nop\n"
218" mov 1, %2\n"
219"2:"
220 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
221 : "r" (lock), "r" (mask)
222 : "memory");
223
224 return result;
225}
226
227#define __raw_read_lock(p) __read_lock(p)
228#define __raw_read_trylock(p) __read_trylock(p)
229#define __raw_read_unlock(p) __read_unlock(p)
230#define __raw_write_lock(p) __write_lock(p)
231#define __raw_write_unlock(p) __write_unlock(p)
232#define __raw_write_trylock(p) __write_trylock(p)
233
234#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
235#define __raw_write_can_lock(rw) (!(rw)->lock)
236
237#define _raw_spin_relax(lock) cpu_relax()
238#define _raw_read_relax(lock) cpu_relax()
239#define _raw_write_relax(lock) cpu_relax()
240
241#endif /* !(__ASSEMBLY__) */
242
243#endif /* !(__SPARC64_SPINLOCK_H) */