blob: 30bb930e1111de62dac9af626b6c4b24dda0920d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_SPINLOCK_H
2#define _ASM_IA64_SPINLOCK_H
3
4/*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 *
9 * This file is used for SMP configurations only.
10 */
11
12#include <linux/compiler.h>
13#include <linux/kernel.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070014#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16#include <asm/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/intrinsics.h>
18#include <asm/system.h>
19
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070020#define __raw_spin_lock_init(x) ((x)->lock = 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022/*
Tony Luck2c869632009-09-25 08:42:16 -070023 * Ticket locks are conceptually two parts, one indicating the current head of
24 * the queue, and the other indicating the current tail. The lock is acquired
25 * by atomically noting the tail and incrementing it by one (thus adding
26 * ourself to the queue and noting our position), then waiting until the head
27 * becomes equal to the the initial value of the tail.
28 *
29 * 63 32 31 0
30 * +----------------------------------------------------+
31 * | next_ticket_number | now_serving |
32 * +----------------------------------------------------+
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 */
34
Tony Luck2c869632009-09-25 08:42:16 -070035#define TICKET_SHIFT 32
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Tony Luck2c869632009-09-25 08:42:16 -070037static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038{
Tony Luck2c869632009-09-25 08:42:16 -070039 int *p = (int *)&lock->lock, turn, now_serving;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Tony Luck2c869632009-09-25 08:42:16 -070041 now_serving = *p;
42 turn = ia64_fetchadd(1, p+1, acq);
43
44 if (turn == now_serving)
45 return;
46
47 do {
48 cpu_relax();
49 } while (ACCESS_ONCE(*p) != turn);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050}
Christoph Lameterf5210892005-08-05 08:02:00 -070051
Tony Luck2c869632009-09-25 08:42:16 -070052static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
53{
54 long tmp = ACCESS_ONCE(lock->lock), try;
Christoph Lameterf5210892005-08-05 08:02:00 -070055
Tony Luck2c869632009-09-25 08:42:16 -070056 if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
57 try = tmp + (1L << TICKET_SHIFT);
58
59 return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp;
60 }
61 return 0;
Christoph Lameterf5210892005-08-05 08:02:00 -070062}
63
Tony Luck2c869632009-09-25 08:42:16 -070064static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
65{
66 int *p = (int *)&lock->lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Tony Luck2c869632009-09-25 08:42:16 -070068 (void)ia64_fetchadd(1, p, rel);
69}
70
71static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
72{
73 long tmp = ACCESS_ONCE(lock->lock);
74
75 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1));
76}
77
78static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
79{
80 long tmp = ACCESS_ONCE(lock->lock);
81
82 return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1;
83}
84
85static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
86{
87 return __ticket_spin_is_locked(lock);
88}
89
90static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
91{
92 return __ticket_spin_is_contended(lock);
93}
94#define __raw_spin_is_contended __raw_spin_is_contended
95
96static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
97{
98 __ticket_spin_lock(lock);
99}
100
101static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
102{
103 return __ticket_spin_trylock(lock);
104}
105
106static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
107{
108 __ticket_spin_unlock(lock);
109}
110
111static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
112 unsigned long flags)
113{
114 __raw_spin_lock(lock);
115}
116
117static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
118{
119 while (__raw_spin_is_locked(lock))
120 cpu_relax();
121}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700123#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
124#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Robin Holt2d09cde2009-04-02 16:59:47 -0700126#ifdef ASM_SUPPORTED
127
128static __always_inline void
129__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
130{
131 __asm__ __volatile__ (
132 "tbit.nz p6, p0 = %1,%2\n"
133 "br.few 3f\n"
134 "1:\n"
135 "fetchadd4.rel r2 = [%0], -1;;\n"
136 "(p6) ssm psr.i\n"
137 "2:\n"
138 "hint @pause\n"
139 "ld4 r2 = [%0];;\n"
140 "cmp4.lt p7,p0 = r2, r0\n"
141 "(p7) br.cond.spnt.few 2b\n"
142 "(p6) rsm psr.i\n"
143 ";;\n"
144 "3:\n"
145 "fetchadd4.acq r2 = [%0], 1;;\n"
146 "cmp4.lt p7,p0 = r2, r0\n"
147 "(p7) br.cond.spnt.few 1b\n"
148 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
149 : "p6", "p7", "r2", "memory");
150}
151
152#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
153
154#else /* !ASM_SUPPORTED */
155
156#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
157
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700158#define __raw_read_lock(rw) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159do { \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700160 raw_rwlock_t *__read_lock_ptr = (rw); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 \
162 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
163 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
164 while (*(volatile int *)__read_lock_ptr < 0) \
165 cpu_relax(); \
166 } \
167} while (0)
168
Robin Holt2d09cde2009-04-02 16:59:47 -0700169#endif /* !ASM_SUPPORTED */
170
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700171#define __raw_read_unlock(rw) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172do { \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700173 raw_rwlock_t *__read_lock_ptr = (rw); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
175} while (0)
176
177#ifdef ASM_SUPPORTED
Robin Holt2d09cde2009-04-02 16:59:47 -0700178
179static __always_inline void
180__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
181{
182 __asm__ __volatile__ (
183 "tbit.nz p6, p0 = %1, %2\n"
184 "mov ar.ccv = r0\n"
185 "dep r29 = -1, r0, 31, 1\n"
186 "br.few 3f;;\n"
187 "1:\n"
188 "(p6) ssm psr.i\n"
189 "2:\n"
190 "hint @pause\n"
191 "ld4 r2 = [%0];;\n"
192 "cmp4.eq p0,p7 = r0, r2\n"
193 "(p7) br.cond.spnt.few 2b\n"
194 "(p6) rsm psr.i\n"
195 ";;\n"
196 "3:\n"
197 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
198 "cmp4.eq p0,p7 = r0, r2\n"
199 "(p7) br.cond.spnt.few 1b;;\n"
200 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
201 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
202}
203
204#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700206#define __raw_write_trylock(rw) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207({ \
208 register long result; \
209 \
210 __asm__ __volatile__ ( \
211 "mov ar.ccv = r0\n" \
212 "dep r29 = -1, r0, 31, 1;;\n" \
213 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
214 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
215 (result == 0); \
216})
217
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700218static inline void __raw_write_unlock(raw_rwlock_t *x)
Christoph Lameterf5210892005-08-05 08:02:00 -0700219{
220 u8 *y = (u8 *)x;
221 barrier();
222 asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
223}
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225#else /* !ASM_SUPPORTED */
226
Robin Holt2d09cde2009-04-02 16:59:47 -0700227#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
228
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700229#define __raw_write_lock(l) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230({ \
231 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
232 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
233 do { \
234 while (*ia64_write_lock_ptr) \
235 ia64_barrier(); \
236 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
237 } while (ia64_val); \
238})
239
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700240#define __raw_write_trylock(rw) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241({ \
242 __u64 ia64_val; \
243 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
244 ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
245 (ia64_val == 0); \
246})
247
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700248static inline void __raw_write_unlock(raw_rwlock_t *x)
Christoph Lameterf5210892005-08-05 08:02:00 -0700249{
250 barrier();
251 x->write_lock = 0;
252}
253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254#endif /* !ASM_SUPPORTED */
255
Keith Owensbf7ecec2005-12-10 14:24:28 +1100256static inline int __raw_read_trylock(raw_rwlock_t *x)
257{
258 union {
259 raw_rwlock_t lock;
260 __u32 word;
261 } old, new;
262 old.lock = new.lock = *x;
263 old.lock.write_lock = new.lock.write_lock = 0;
264 ++new.lock.read_counter;
265 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
266}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700268#define _raw_spin_relax(lock) cpu_relax()
269#define _raw_read_relax(lock) cpu_relax()
270#define _raw_write_relax(lock) cpu_relax()
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272#endif /* _ASM_IA64_SPINLOCK_H */