blob: 54ff557d474e49a2376cefe8a78e646fda36cc78 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_SPINLOCK_H
2#define _ASM_IA64_SPINLOCK_H
3
4/*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 *
9 * This file is used for SMP configurations only.
10 */
11
12#include <linux/compiler.h>
13#include <linux/kernel.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070014#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Arun Sharma60063492011-07-26 16:09:06 -070016#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/intrinsics.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010019#define arch_spin_lock_init(x) ((x)->lock = 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Linus Torvalds1da177e2005-04-16 15:20:36 -070021/*
Tony Luck2c869632009-09-25 08:42:16 -070022 * Ticket locks are conceptually two parts, one indicating the current head of
23 * the queue, and the other indicating the current tail. The lock is acquired
24 * by atomically noting the tail and incrementing it by one (thus adding
25 * ourself to the queue and noting our position), then waiting until the head
26 * becomes equal to the the initial value of the tail.
Tony Luck9d40ee22009-10-07 10:54:19 -070027 * The pad bits in the middle are used to prevent the next_ticket number
28 * overflowing into the now_serving number.
Tony Luck2c869632009-09-25 08:42:16 -070029 *
Tony Luck9d40ee22009-10-07 10:54:19 -070030 * 31 17 16 15 14 0
Tony Luck2c869632009-09-25 08:42:16 -070031 * +----------------------------------------------------+
Tony Luck9d40ee22009-10-07 10:54:19 -070032 * | now_serving | padding | next_ticket |
Tony Luck2c869632009-09-25 08:42:16 -070033 * +----------------------------------------------------+
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 */
35
Tony Luck9d40ee22009-10-07 10:54:19 -070036#define TICKET_SHIFT 17
37#define TICKET_BITS 15
38#define TICKET_MASK ((1 << TICKET_BITS) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Thomas Gleixner445c8952009-12-02 19:49:50 +010040static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
Tony Luck9d40ee22009-10-07 10:54:19 -070042 int *p = (int *)&lock->lock, ticket, serve;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Tony Luck9d40ee22009-10-07 10:54:19 -070044 ticket = ia64_fetchadd(1, p, acq);
Tony Luck2c869632009-09-25 08:42:16 -070045
Tony Luck9d40ee22009-10-07 10:54:19 -070046 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
Tony Luck2c869632009-09-25 08:42:16 -070047 return;
48
Tony Luck9d40ee22009-10-07 10:54:19 -070049 ia64_invala();
50
51 for (;;) {
52 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
53
54 if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
55 return;
Tony Luck2c869632009-09-25 08:42:16 -070056 cpu_relax();
Tony Luck9d40ee22009-10-07 10:54:19 -070057 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
Christoph Lameterf5210892005-08-05 08:02:00 -070059
Thomas Gleixner445c8952009-12-02 19:49:50 +010060static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
Tony Luck2c869632009-09-25 08:42:16 -070061{
Tony Luck9d40ee22009-10-07 10:54:19 -070062 int tmp = ACCESS_ONCE(lock->lock);
Christoph Lameterf5210892005-08-05 08:02:00 -070063
Tony Luck9d40ee22009-10-07 10:54:19 -070064 if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
65 return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
Tony Luck2c869632009-09-25 08:42:16 -070066 return 0;
Christoph Lameterf5210892005-08-05 08:02:00 -070067}
68
Thomas Gleixner445c8952009-12-02 19:49:50 +010069static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
Tony Luck2c869632009-09-25 08:42:16 -070070{
Tony Luck9d40ee22009-10-07 10:54:19 -070071 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Tony Luck9d40ee22009-10-07 10:54:19 -070073 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
74 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
Tony Luck2c869632009-09-25 08:42:16 -070075}
76
Thomas Gleixner445c8952009-12-02 19:49:50 +010077static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
Tony Luck1502f082009-10-12 09:51:41 -070078{
79 int *p = (int *)&lock->lock, ticket;
80
81 ia64_invala();
82
83 for (;;) {
84 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
85 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
86 return;
87 cpu_relax();
88 }
89}
90
Thomas Gleixner445c8952009-12-02 19:49:50 +010091static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
Tony Luck2c869632009-09-25 08:42:16 -070092{
93 long tmp = ACCESS_ONCE(lock->lock);
94
Tony Luck9d40ee22009-10-07 10:54:19 -070095 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
Tony Luck2c869632009-09-25 08:42:16 -070096}
97
Thomas Gleixner445c8952009-12-02 19:49:50 +010098static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
Tony Luck2c869632009-09-25 08:42:16 -070099{
100 long tmp = ACCESS_ONCE(lock->lock);
101
Tony Luck9d40ee22009-10-07 10:54:19 -0700102 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
Tony Luck2c869632009-09-25 08:42:16 -0700103}
104
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100105static inline int arch_spin_is_locked(arch_spinlock_t *lock)
Tony Luck2c869632009-09-25 08:42:16 -0700106{
107 return __ticket_spin_is_locked(lock);
108}
109
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100110static inline int arch_spin_is_contended(arch_spinlock_t *lock)
Tony Luck2c869632009-09-25 08:42:16 -0700111{
112 return __ticket_spin_is_contended(lock);
113}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100114#define arch_spin_is_contended arch_spin_is_contended
Tony Luck2c869632009-09-25 08:42:16 -0700115
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100116static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
Tony Luck2c869632009-09-25 08:42:16 -0700117{
118 __ticket_spin_lock(lock);
119}
120
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100121static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
Tony Luck2c869632009-09-25 08:42:16 -0700122{
123 return __ticket_spin_trylock(lock);
124}
125
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100126static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
Tony Luck2c869632009-09-25 08:42:16 -0700127{
128 __ticket_spin_unlock(lock);
129}
130
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100131static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
Tony Luck2c869632009-09-25 08:42:16 -0700132 unsigned long flags)
133{
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100134 arch_spin_lock(lock);
Tony Luck2c869632009-09-25 08:42:16 -0700135}
136
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100137static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
Tony Luck2c869632009-09-25 08:42:16 -0700138{
Tony Luck1502f082009-10-12 09:51:41 -0700139 __ticket_spin_unlock_wait(lock);
Tony Luck2c869632009-09-25 08:42:16 -0700140}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Thomas Gleixnere5931942009-12-03 20:08:46 +0100142#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
143#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Robin Holt2d09cde2009-04-02 16:59:47 -0700145#ifdef ASM_SUPPORTED
146
147static __always_inline void
Thomas Gleixnere5931942009-12-03 20:08:46 +0100148arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
Robin Holt2d09cde2009-04-02 16:59:47 -0700149{
150 __asm__ __volatile__ (
151 "tbit.nz p6, p0 = %1,%2\n"
152 "br.few 3f\n"
153 "1:\n"
154 "fetchadd4.rel r2 = [%0], -1;;\n"
155 "(p6) ssm psr.i\n"
156 "2:\n"
157 "hint @pause\n"
158 "ld4 r2 = [%0];;\n"
159 "cmp4.lt p7,p0 = r2, r0\n"
160 "(p7) br.cond.spnt.few 2b\n"
161 "(p6) rsm psr.i\n"
162 ";;\n"
163 "3:\n"
164 "fetchadd4.acq r2 = [%0], 1;;\n"
165 "cmp4.lt p7,p0 = r2, r0\n"
166 "(p7) br.cond.spnt.few 1b\n"
167 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
168 : "p6", "p7", "r2", "memory");
169}
170
Thomas Gleixnere5931942009-12-03 20:08:46 +0100171#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
Robin Holt2d09cde2009-04-02 16:59:47 -0700172
173#else /* !ASM_SUPPORTED */
174
Thomas Gleixnere5931942009-12-03 20:08:46 +0100175#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
Robin Holt2d09cde2009-04-02 16:59:47 -0700176
Thomas Gleixnere5931942009-12-03 20:08:46 +0100177#define arch_read_lock(rw) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178do { \
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100179 arch_rwlock_t *__read_lock_ptr = (rw); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 \
181 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
182 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
183 while (*(volatile int *)__read_lock_ptr < 0) \
184 cpu_relax(); \
185 } \
186} while (0)
187
Robin Holt2d09cde2009-04-02 16:59:47 -0700188#endif /* !ASM_SUPPORTED */
189
Thomas Gleixnere5931942009-12-03 20:08:46 +0100190#define arch_read_unlock(rw) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191do { \
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100192 arch_rwlock_t *__read_lock_ptr = (rw); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
194} while (0)
195
196#ifdef ASM_SUPPORTED
Robin Holt2d09cde2009-04-02 16:59:47 -0700197
198static __always_inline void
Thomas Gleixnere5931942009-12-03 20:08:46 +0100199arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
Robin Holt2d09cde2009-04-02 16:59:47 -0700200{
201 __asm__ __volatile__ (
202 "tbit.nz p6, p0 = %1, %2\n"
203 "mov ar.ccv = r0\n"
204 "dep r29 = -1, r0, 31, 1\n"
205 "br.few 3f;;\n"
206 "1:\n"
207 "(p6) ssm psr.i\n"
208 "2:\n"
209 "hint @pause\n"
210 "ld4 r2 = [%0];;\n"
211 "cmp4.eq p0,p7 = r0, r2\n"
212 "(p7) br.cond.spnt.few 2b\n"
213 "(p6) rsm psr.i\n"
214 ";;\n"
215 "3:\n"
216 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
217 "cmp4.eq p0,p7 = r0, r2\n"
218 "(p7) br.cond.spnt.few 1b;;\n"
219 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
220 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
221}
222
Thomas Gleixnere5931942009-12-03 20:08:46 +0100223#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Thomas Gleixnere5931942009-12-03 20:08:46 +0100225#define arch_write_trylock(rw) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226({ \
227 register long result; \
228 \
229 __asm__ __volatile__ ( \
230 "mov ar.ccv = r0\n" \
231 "dep r29 = -1, r0, 31, 1;;\n" \
232 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
233 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
234 (result == 0); \
235})
236
Thomas Gleixnere5931942009-12-03 20:08:46 +0100237static inline void arch_write_unlock(arch_rwlock_t *x)
Christoph Lameterf5210892005-08-05 08:02:00 -0700238{
239 u8 *y = (u8 *)x;
240 barrier();
241 asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
242}
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244#else /* !ASM_SUPPORTED */
245
Thomas Gleixnere5931942009-12-03 20:08:46 +0100246#define arch_write_lock_flags(l, flags) arch_write_lock(l)
Robin Holt2d09cde2009-04-02 16:59:47 -0700247
Thomas Gleixnere5931942009-12-03 20:08:46 +0100248#define arch_write_lock(l) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249({ \
250 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
251 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
252 do { \
253 while (*ia64_write_lock_ptr) \
254 ia64_barrier(); \
255 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
256 } while (ia64_val); \
257})
258
Thomas Gleixnere5931942009-12-03 20:08:46 +0100259#define arch_write_trylock(rw) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260({ \
261 __u64 ia64_val; \
262 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
263 ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
264 (ia64_val == 0); \
265})
266
Thomas Gleixnere5931942009-12-03 20:08:46 +0100267static inline void arch_write_unlock(arch_rwlock_t *x)
Christoph Lameterf5210892005-08-05 08:02:00 -0700268{
269 barrier();
270 x->write_lock = 0;
271}
272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273#endif /* !ASM_SUPPORTED */
274
Thomas Gleixnere5931942009-12-03 20:08:46 +0100275static inline int arch_read_trylock(arch_rwlock_t *x)
Keith Owensbf7ecec2005-12-10 14:24:28 +1100276{
277 union {
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100278 arch_rwlock_t lock;
Keith Owensbf7ecec2005-12-10 14:24:28 +1100279 __u32 word;
280 } old, new;
281 old.lock = new.lock = *x;
282 old.lock.write_lock = new.lock.write_lock = 0;
283 ++new.lock.read_counter;
284 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
285}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100287#define arch_spin_relax(lock) cpu_relax()
288#define arch_read_relax(lock) cpu_relax()
289#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291#endif /* _ASM_IA64_SPINLOCK_H */