blob: 3089f70c0c52059e569c8745d1dcca089daee8af [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_SPINLOCK_H
2#define _ASM_X86_SPINLOCK_H
Glauber de Oliveira Costa2fed0c52008-01-30 13:30:33 +01003
Thomas Gleixner1075cf72008-01-30 13:30:34 +01004#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <asm/processor.h>
Nick Piggin314cdbe2008-01-30 13:31:21 +01008#include <linux/compiler.h>
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -07009#include <asm/paravirt.h>
Thomas Gleixner1075cf72008-01-30 13:30:34 +010010/*
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 *
13 * Simple spin lock operations. There are two variants, one clears IRQ's
14 * on the local processor, one does not.
15 *
Nick Piggin314cdbe2008-01-30 13:31:21 +010016 * These are fair FIFO ticket locks, which are currently limited to 256
17 * CPUs.
Thomas Gleixner1075cf72008-01-30 13:30:34 +010018 *
19 * (the type definitions are in asm/spinlock_types.h)
20 */
21
Thomas Gleixner96a388d2007-10-11 11:20:03 +020022#ifdef CONFIG_X86_32
Thomas Gleixner1075cf72008-01-30 13:30:34 +010023# define LOCK_PTR_REG "a"
Jan Beulich74e91602008-09-05 13:27:45 +010024# define REG_PTR_MODE "k"
Thomas Gleixner96a388d2007-10-11 11:20:03 +020025#else
Thomas Gleixner1075cf72008-01-30 13:30:34 +010026# define LOCK_PTR_REG "D"
Jan Beulich74e91602008-09-05 13:27:45 +010027# define REG_PTR_MODE "q"
Thomas Gleixner96a388d2007-10-11 11:20:03 +020028#endif
Glauber de Oliveira Costa2fed0c52008-01-30 13:30:33 +010029
Nick Piggin3a556b22008-01-30 13:33:00 +010030#if defined(CONFIG_X86_32) && \
31 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
32/*
33 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
34 * (PPro errata 66, 92)
35 */
36# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
37#else
38# define UNLOCK_LOCK_PREFIX
Nick Piggin314cdbe2008-01-30 13:31:21 +010039#endif
40
Nick Piggin3a556b22008-01-30 13:33:00 +010041/*
42 * Ticket locks are conceptually two parts, one indicating the current head of
43 * the queue, and the other indicating the current tail. The lock is acquired
44 * by atomically noting the tail and incrementing it by one (thus adding
45 * ourself to the queue and noting our position), then waiting until the head
46 * becomes equal to the the initial value of the tail.
47 *
48 * We use an xadd covering *both* parts of the lock, to increment the tail and
49 * also load the position of the head, which takes care of memory ordering
50 * issues and should be optimal for the uncontended case. Note the tail must be
51 * in the high part, because a wide xadd increment of the low part would carry
52 * up and contaminate the high part.
53 *
54 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
55 * save some instructions and make the code more elegant. There really isn't
56 * much between them in performance though, especially as locks are out of line.
57 */
58#if (NR_CPUS < 256)
Jan Beulich08f5fcb2008-09-05 13:26:39 +010059#define TICKET_SHIFT 8
Thomas Gleixner1075cf72008-01-30 13:30:34 +010060
Thomas Gleixner445c8952009-12-02 19:49:50 +010061static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +010062{
Nick Piggin314cdbe2008-01-30 13:31:21 +010063 short inc = 0x0100;
64
Joe Perchesd3bf60a2008-03-23 01:03:31 -070065 asm volatile (
Nick Piggin314cdbe2008-01-30 13:31:21 +010066 LOCK_PREFIX "xaddw %w0, %1\n"
67 "1:\t"
68 "cmpb %h0, %b0\n\t"
69 "je 2f\n\t"
70 "rep ; nop\n\t"
71 "movb %1, %b0\n\t"
72 /* don't need lfence here, because loads are in-order */
Thomas Gleixner1075cf72008-01-30 13:30:34 +010073 "jmp 1b\n"
Nick Piggin314cdbe2008-01-30 13:31:21 +010074 "2:"
Joe Perchesd3bf60a2008-03-23 01:03:31 -070075 : "+Q" (inc), "+m" (lock->slock)
Nick Piggin314cdbe2008-01-30 13:31:21 +010076 :
Joe Perchesd3bf60a2008-03-23 01:03:31 -070077 : "memory", "cc");
Thomas Gleixner1075cf72008-01-30 13:30:34 +010078}
79
Thomas Gleixner445c8952009-12-02 19:49:50 +010080static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +010081{
Jan Beulich74e91602008-09-05 13:27:45 +010082 int tmp, new;
Thomas Gleixner1075cf72008-01-30 13:30:34 +010083
Jan Beulich74e91602008-09-05 13:27:45 +010084 asm volatile("movzwl %2, %0\n\t"
Joe Perchesd3bf60a2008-03-23 01:03:31 -070085 "cmpb %h0,%b0\n\t"
Jan Beulich74e91602008-09-05 13:27:45 +010086 "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
Joe Perchesd3bf60a2008-03-23 01:03:31 -070087 "jne 1f\n\t"
Mathieu Desnoyers5bbd4c32008-08-15 12:56:59 -040088 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
Joe Perchesd3bf60a2008-03-23 01:03:31 -070089 "1:"
90 "sete %b1\n\t"
91 "movzbl %b1,%0\n\t"
Jan Beulich74e91602008-09-05 13:27:45 +010092 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
Joe Perchesd3bf60a2008-03-23 01:03:31 -070093 :
94 : "memory", "cc");
Thomas Gleixner1075cf72008-01-30 13:30:34 +010095
Nick Piggin314cdbe2008-01-30 13:31:21 +010096 return tmp;
Thomas Gleixner1075cf72008-01-30 13:30:34 +010097}
98
Thomas Gleixner445c8952009-12-02 19:49:50 +010099static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100100{
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
102 : "+m" (lock->slock)
103 :
104 : "memory", "cc");
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100105}
Nick Piggin3a556b22008-01-30 13:33:00 +0100106#else
Jan Beulich08f5fcb2008-09-05 13:26:39 +0100107#define TICKET_SHIFT 16
Nick Piggin3a556b22008-01-30 13:33:00 +0100108
Thomas Gleixner445c8952009-12-02 19:49:50 +0100109static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100110{
111 int inc = 0x00010000;
112 int tmp;
113
Mathieu Desnoyers5bbd4c32008-08-15 12:56:59 -0400114 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700115 "movzwl %w0, %2\n\t"
116 "shrl $16, %0\n\t"
117 "1:\t"
118 "cmpl %0, %2\n\t"
119 "je 2f\n\t"
120 "rep ; nop\n\t"
121 "movzwl %1, %2\n\t"
122 /* don't need lfence here, because loads are in-order */
123 "jmp 1b\n"
124 "2:"
Jan Beulichef1f3412008-09-05 13:26:39 +0100125 : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700126 :
127 : "memory", "cc");
Nick Piggin3a556b22008-01-30 13:33:00 +0100128}
129
Thomas Gleixner445c8952009-12-02 19:49:50 +0100130static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100131{
132 int tmp;
133 int new;
134
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700135 asm volatile("movl %2,%0\n\t"
136 "movl %0,%1\n\t"
137 "roll $16, %0\n\t"
138 "cmpl %0,%1\n\t"
Jan Beulich74e91602008-09-05 13:27:45 +0100139 "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700140 "jne 1f\n\t"
Mathieu Desnoyers5bbd4c32008-08-15 12:56:59 -0400141 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700142 "1:"
143 "sete %b1\n\t"
144 "movzbl %b1,%0\n\t"
Jan Beulichef1f3412008-09-05 13:26:39 +0100145 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700146 :
147 : "memory", "cc");
Nick Piggin3a556b22008-01-30 13:33:00 +0100148
149 return tmp;
150}
151
Thomas Gleixner445c8952009-12-02 19:49:50 +0100152static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100153{
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
155 : "+m" (lock->slock)
156 :
157 : "memory", "cc");
Nick Piggin3a556b22008-01-30 13:33:00 +0100158}
159#endif
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100160
Thomas Gleixner445c8952009-12-02 19:49:50 +0100161static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
Jan Beulich08f5fcb2008-09-05 13:26:39 +0100162{
163 int tmp = ACCESS_ONCE(lock->slock);
164
165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
166}
167
Thomas Gleixner445c8952009-12-02 19:49:50 +0100168static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
Jan Beulich08f5fcb2008-09-05 13:26:39 +0100169{
170 int tmp = ACCESS_ONCE(lock->slock);
171
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173}
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700174
Jeremy Fitzhardingeb4ecc122009-05-13 17:16:55 -0700175#ifndef CONFIG_PARAVIRT_SPINLOCKS
Jeremy Fitzhardinge8efcbab2008-07-07 12:07:51 -0700176
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100177static inline int arch_spin_is_locked(arch_spinlock_t *lock)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700178{
179 return __ticket_spin_is_locked(lock);
180}
181
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100182static inline int arch_spin_is_contended(arch_spinlock_t *lock)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700183{
184 return __ticket_spin_is_contended(lock);
185}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100186#define arch_spin_is_contended arch_spin_is_contended
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700187
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100188static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700189{
190 __ticket_spin_lock(lock);
191}
192
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100193static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700194{
195 return __ticket_spin_trylock(lock);
196}
197
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100198static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700199{
200 __ticket_spin_unlock(lock);
201}
Jeremy Fitzhardinge63d3a752008-08-19 13:19:36 -0700202
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100203static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
Jeremy Fitzhardinge63d3a752008-08-19 13:19:36 -0700204 unsigned long flags)
205{
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100206 arch_spin_lock(lock);
Jeremy Fitzhardinge63d3a752008-08-19 13:19:36 -0700207}
208
Jeremy Fitzhardingeb4ecc122009-05-13 17:16:55 -0700209#endif /* CONFIG_PARAVIRT_SPINLOCKS */
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700210
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100211static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100212{
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100213 while (arch_spin_is_locked(lock))
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100214 cpu_relax();
215}
216
217/*
218 * Read-write spinlocks, allowing multiple readers
219 * but only one writer.
220 *
221 * NOTE! it is quite common to have readers in interrupts
222 * but no interrupt writers. For those circumstances we
223 * can "mix" irq-safe locks - any writer needs to get a
224 * irq-safe write-lock, but readers can get non-irqsafe
225 * read-locks.
226 *
227 * On x86, we implement read-write locks as a 32-bit counter
228 * with the high bit (sign) being the "contended" bit.
229 */
230
Nick Piggin314cdbe2008-01-30 13:31:21 +0100231/**
232 * read_can_lock - would read_trylock() succeed?
233 * @lock: the rwlock in question.
234 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100235static inline int arch_read_can_lock(arch_rwlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100236{
237 return (int)(lock)->lock > 0;
238}
239
Nick Piggin314cdbe2008-01-30 13:31:21 +0100240/**
241 * write_can_lock - would write_trylock() succeed?
242 * @lock: the rwlock in question.
243 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100244static inline int arch_write_can_lock(arch_rwlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100245{
246 return (lock)->lock == RW_LOCK_BIAS;
247}
248
Thomas Gleixnere5931942009-12-03 20:08:46 +0100249static inline void arch_read_lock(arch_rwlock_t *rw)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100250{
251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
252 "jns 1f\n"
253 "call __read_lock_failed\n\t"
254 "1:\n"
255 ::LOCK_PTR_REG (rw) : "memory");
256}
257
Thomas Gleixnere5931942009-12-03 20:08:46 +0100258static inline void arch_write_lock(arch_rwlock_t *rw)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100259{
260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
261 "jz 1f\n"
262 "call __write_lock_failed\n\t"
263 "1:\n"
264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
265}
266
Thomas Gleixnere5931942009-12-03 20:08:46 +0100267static inline int arch_read_trylock(arch_rwlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100268{
269 atomic_t *count = (atomic_t *)lock;
270
Frederic Weisbecker2d4d57d2009-01-25 12:50:13 -0800271 if (atomic_dec_return(count) >= 0)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100272 return 1;
273 atomic_inc(count);
274 return 0;
275}
276
Thomas Gleixnere5931942009-12-03 20:08:46 +0100277static inline int arch_write_trylock(arch_rwlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100278{
279 atomic_t *count = (atomic_t *)lock;
280
281 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
282 return 1;
283 atomic_add(RW_LOCK_BIAS, count);
284 return 0;
285}
286
Thomas Gleixnere5931942009-12-03 20:08:46 +0100287static inline void arch_read_unlock(arch_rwlock_t *rw)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100288{
289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
290}
291
Thomas Gleixnere5931942009-12-03 20:08:46 +0100292static inline void arch_write_unlock(arch_rwlock_t *rw)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100293{
294 asm volatile(LOCK_PREFIX "addl %1, %0"
295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
296}
297
Thomas Gleixnere5931942009-12-03 20:08:46 +0100298#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
299#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Robin Holtf5f7eac2009-04-02 16:59:46 -0700300
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100301#define arch_spin_relax(lock) cpu_relax()
302#define arch_read_relax(lock) cpu_relax()
303#define arch_write_relax(lock) cpu_relax()
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100304
Jiri Olsaad462762009-07-08 12:10:31 +0000305/* The {read|write|spin}_lock() on x86 are full memory barriers. */
306static inline void smp_mb__after_lock(void) { }
307#define ARCH_HAS_SMP_MB_AFTER_LOCK
308
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700309#endif /* _ASM_X86_SPINLOCK_H */