blob: ea2a04f69ca93f611e9bf4d8549badabf5ba89b2 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_SPINLOCK_H
2#define _ASM_X86_SPINLOCK_H
Glauber de Oliveira Costa2fed0c52008-01-30 13:30:33 +01003
Arun Sharma60063492011-07-26 16:09:06 -07004#include <linux/atomic.h>
Thomas Gleixner1075cf72008-01-30 13:30:34 +01005#include <asm/page.h>
6#include <asm/processor.h>
Nick Piggin314cdbe2008-01-30 13:31:21 +01007#include <linux/compiler.h>
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -07008#include <asm/paravirt.h>
Thomas Gleixner1075cf72008-01-30 13:30:34 +01009/*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 *
12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not.
14 *
Nick Piggin314cdbe2008-01-30 13:31:21 +010015 * These are fair FIFO ticket locks, which are currently limited to 256
16 * CPUs.
Thomas Gleixner1075cf72008-01-30 13:30:34 +010017 *
18 * (the type definitions are in asm/spinlock_types.h)
19 */
20
Thomas Gleixner96a388d2007-10-11 11:20:03 +020021#ifdef CONFIG_X86_32
Thomas Gleixner1075cf72008-01-30 13:30:34 +010022# define LOCK_PTR_REG "a"
Jan Beulich74e91602008-09-05 13:27:45 +010023# define REG_PTR_MODE "k"
Thomas Gleixner96a388d2007-10-11 11:20:03 +020024#else
Thomas Gleixner1075cf72008-01-30 13:30:34 +010025# define LOCK_PTR_REG "D"
Jan Beulich74e91602008-09-05 13:27:45 +010026# define REG_PTR_MODE "q"
Thomas Gleixner96a388d2007-10-11 11:20:03 +020027#endif
Glauber de Oliveira Costa2fed0c52008-01-30 13:30:33 +010028
Nick Piggin3a556b22008-01-30 13:33:00 +010029#if defined(CONFIG_X86_32) && \
30 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
31/*
32 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
33 * (PPro errata 66, 92)
34 */
35# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
36#else
37# define UNLOCK_LOCK_PREFIX
Nick Piggin314cdbe2008-01-30 13:31:21 +010038#endif
39
Nick Piggin3a556b22008-01-30 13:33:00 +010040/*
41 * Ticket locks are conceptually two parts, one indicating the current head of
42 * the queue, and the other indicating the current tail. The lock is acquired
43 * by atomically noting the tail and incrementing it by one (thus adding
44 * ourself to the queue and noting our position), then waiting until the head
45 * becomes equal to the the initial value of the tail.
46 *
47 * We use an xadd covering *both* parts of the lock, to increment the tail and
48 * also load the position of the head, which takes care of memory ordering
49 * issues and should be optimal for the uncontended case. Note the tail must be
50 * in the high part, because a wide xadd increment of the low part would carry
51 * up and contaminate the high part.
52 *
53 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
54 * save some instructions and make the code more elegant. There really isn't
55 * much between them in performance though, especially as locks are out of line.
56 */
57#if (NR_CPUS < 256)
Thomas Gleixner445c8952009-12-02 19:49:50 +010058static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +010059{
Jeremy Fitzhardinge84eb9502010-07-02 23:26:36 +010060 unsigned short inc = 1 << TICKET_SHIFT;
Nick Piggin314cdbe2008-01-30 13:31:21 +010061
Joe Perchesd3bf60a2008-03-23 01:03:31 -070062 asm volatile (
Nick Piggin314cdbe2008-01-30 13:31:21 +010063 LOCK_PREFIX "xaddw %w0, %1\n"
64 "1:\t"
65 "cmpb %h0, %b0\n\t"
66 "je 2f\n\t"
67 "rep ; nop\n\t"
68 "movb %1, %b0\n\t"
69 /* don't need lfence here, because loads are in-order */
Thomas Gleixner1075cf72008-01-30 13:30:34 +010070 "jmp 1b\n"
Nick Piggin314cdbe2008-01-30 13:31:21 +010071 "2:"
Joe Perchesd3bf60a2008-03-23 01:03:31 -070072 : "+Q" (inc), "+m" (lock->slock)
Nick Piggin314cdbe2008-01-30 13:31:21 +010073 :
Joe Perchesd3bf60a2008-03-23 01:03:31 -070074 : "memory", "cc");
Thomas Gleixner1075cf72008-01-30 13:30:34 +010075}
76
Thomas Gleixner445c8952009-12-02 19:49:50 +010077static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +010078{
Jeremy Fitzhardinge84eb9502010-07-02 23:26:36 +010079 unsigned int tmp, new;
Thomas Gleixner1075cf72008-01-30 13:30:34 +010080
Jan Beulich74e91602008-09-05 13:27:45 +010081 asm volatile("movzwl %2, %0\n\t"
Joe Perchesd3bf60a2008-03-23 01:03:31 -070082 "cmpb %h0,%b0\n\t"
Jan Beulich74e91602008-09-05 13:27:45 +010083 "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
Joe Perchesd3bf60a2008-03-23 01:03:31 -070084 "jne 1f\n\t"
Mathieu Desnoyers5bbd4c32008-08-15 12:56:59 -040085 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
Joe Perchesd3bf60a2008-03-23 01:03:31 -070086 "1:"
87 "sete %b1\n\t"
88 "movzbl %b1,%0\n\t"
Jan Beulich74e91602008-09-05 13:27:45 +010089 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
Joe Perchesd3bf60a2008-03-23 01:03:31 -070090 :
91 : "memory", "cc");
Thomas Gleixner1075cf72008-01-30 13:30:34 +010092
Nick Piggin314cdbe2008-01-30 13:31:21 +010093 return tmp;
Thomas Gleixner1075cf72008-01-30 13:30:34 +010094}
95
Thomas Gleixner445c8952009-12-02 19:49:50 +010096static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +010097{
Joe Perchesd3bf60a2008-03-23 01:03:31 -070098 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
99 : "+m" (lock->slock)
100 :
101 : "memory", "cc");
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100102}
Nick Piggin3a556b22008-01-30 13:33:00 +0100103#else
Thomas Gleixner445c8952009-12-02 19:49:50 +0100104static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100105{
Jeremy Fitzhardinge84eb9502010-07-02 23:26:36 +0100106 unsigned inc = 1 << TICKET_SHIFT;
107 unsigned tmp;
Nick Piggin3a556b22008-01-30 13:33:00 +0100108
Mathieu Desnoyers5bbd4c32008-08-15 12:56:59 -0400109 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700110 "movzwl %w0, %2\n\t"
111 "shrl $16, %0\n\t"
112 "1:\t"
113 "cmpl %0, %2\n\t"
114 "je 2f\n\t"
115 "rep ; nop\n\t"
116 "movzwl %1, %2\n\t"
117 /* don't need lfence here, because loads are in-order */
118 "jmp 1b\n"
119 "2:"
Jan Beulichef1f3412008-09-05 13:26:39 +0100120 : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700121 :
122 : "memory", "cc");
Nick Piggin3a556b22008-01-30 13:33:00 +0100123}
124
Thomas Gleixner445c8952009-12-02 19:49:50 +0100125static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100126{
Jeremy Fitzhardinge84eb9502010-07-02 23:26:36 +0100127 unsigned tmp;
128 unsigned new;
Nick Piggin3a556b22008-01-30 13:33:00 +0100129
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700130 asm volatile("movl %2,%0\n\t"
131 "movl %0,%1\n\t"
132 "roll $16, %0\n\t"
133 "cmpl %0,%1\n\t"
Jan Beulich74e91602008-09-05 13:27:45 +0100134 "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700135 "jne 1f\n\t"
Mathieu Desnoyers5bbd4c32008-08-15 12:56:59 -0400136 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700137 "1:"
138 "sete %b1\n\t"
139 "movzbl %b1,%0\n\t"
Jan Beulichef1f3412008-09-05 13:26:39 +0100140 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700141 :
142 : "memory", "cc");
Nick Piggin3a556b22008-01-30 13:33:00 +0100143
144 return tmp;
145}
146
Thomas Gleixner445c8952009-12-02 19:49:50 +0100147static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100148{
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700149 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
150 : "+m" (lock->slock)
151 :
152 : "memory", "cc");
Nick Piggin3a556b22008-01-30 13:33:00 +0100153}
154#endif
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100155
Thomas Gleixner445c8952009-12-02 19:49:50 +0100156static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
Jan Beulich08f5fcb2008-09-05 13:26:39 +0100157{
Jeremy Fitzhardinge84eb9502010-07-02 23:26:36 +0100158 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
Jan Beulich08f5fcb2008-09-05 13:26:39 +0100159
Jeremy Fitzhardinge84eb9502010-07-02 23:26:36 +0100160 return !!(tmp.tail ^ tmp.head);
Jan Beulich08f5fcb2008-09-05 13:26:39 +0100161}
162
Thomas Gleixner445c8952009-12-02 19:49:50 +0100163static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
Jan Beulich08f5fcb2008-09-05 13:26:39 +0100164{
Jeremy Fitzhardinge84eb9502010-07-02 23:26:36 +0100165 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
Jan Beulich08f5fcb2008-09-05 13:26:39 +0100166
Jeremy Fitzhardinge84eb9502010-07-02 23:26:36 +0100167 return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
Jan Beulich08f5fcb2008-09-05 13:26:39 +0100168}
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700169
Jeremy Fitzhardingeb4ecc122009-05-13 17:16:55 -0700170#ifndef CONFIG_PARAVIRT_SPINLOCKS
Jeremy Fitzhardinge8efcbab2008-07-07 12:07:51 -0700171
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100172static inline int arch_spin_is_locked(arch_spinlock_t *lock)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700173{
174 return __ticket_spin_is_locked(lock);
175}
176
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100177static inline int arch_spin_is_contended(arch_spinlock_t *lock)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700178{
179 return __ticket_spin_is_contended(lock);
180}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100181#define arch_spin_is_contended arch_spin_is_contended
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700182
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100183static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700184{
185 __ticket_spin_lock(lock);
186}
187
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100188static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700189{
190 return __ticket_spin_trylock(lock);
191}
192
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100193static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700194{
195 __ticket_spin_unlock(lock);
196}
Jeremy Fitzhardinge63d3a752008-08-19 13:19:36 -0700197
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100198static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
Jeremy Fitzhardinge63d3a752008-08-19 13:19:36 -0700199 unsigned long flags)
200{
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100201 arch_spin_lock(lock);
Jeremy Fitzhardinge63d3a752008-08-19 13:19:36 -0700202}
203
Jeremy Fitzhardingeb4ecc122009-05-13 17:16:55 -0700204#endif /* CONFIG_PARAVIRT_SPINLOCKS */
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700205
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100206static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100207{
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100208 while (arch_spin_is_locked(lock))
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100209 cpu_relax();
210}
211
212/*
213 * Read-write spinlocks, allowing multiple readers
214 * but only one writer.
215 *
216 * NOTE! it is quite common to have readers in interrupts
217 * but no interrupt writers. For those circumstances we
218 * can "mix" irq-safe locks - any writer needs to get a
219 * irq-safe write-lock, but readers can get non-irqsafe
220 * read-locks.
221 *
222 * On x86, we implement read-write locks as a 32-bit counter
223 * with the high bit (sign) being the "contended" bit.
224 */
225
Nick Piggin314cdbe2008-01-30 13:31:21 +0100226/**
227 * read_can_lock - would read_trylock() succeed?
228 * @lock: the rwlock in question.
229 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100230static inline int arch_read_can_lock(arch_rwlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100231{
Jan Beulicha7500362011-07-19 13:00:45 +0100232 return lock->lock > 0;
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100233}
234
Nick Piggin314cdbe2008-01-30 13:31:21 +0100235/**
236 * write_can_lock - would write_trylock() succeed?
237 * @lock: the rwlock in question.
238 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100239static inline int arch_write_can_lock(arch_rwlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100240{
Jan Beulicha7500362011-07-19 13:00:45 +0100241 return lock->write == WRITE_LOCK_CMP;
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100242}
243
Thomas Gleixnere5931942009-12-03 20:08:46 +0100244static inline void arch_read_lock(arch_rwlock_t *rw)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100245{
Jan Beulicha7500362011-07-19 13:00:45 +0100246 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100247 "jns 1f\n"
248 "call __read_lock_failed\n\t"
249 "1:\n"
250 ::LOCK_PTR_REG (rw) : "memory");
251}
252
Thomas Gleixnere5931942009-12-03 20:08:46 +0100253static inline void arch_write_lock(arch_rwlock_t *rw)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100254{
Jan Beulicha7500362011-07-19 13:00:45 +0100255 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100256 "jz 1f\n"
257 "call __write_lock_failed\n\t"
258 "1:\n"
Jan Beulicha7500362011-07-19 13:00:45 +0100259 ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
260 : "memory");
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100261}
262
Thomas Gleixnere5931942009-12-03 20:08:46 +0100263static inline int arch_read_trylock(arch_rwlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100264{
Jan Beulicha7500362011-07-19 13:00:45 +0100265 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100266
Jan Beulicha7500362011-07-19 13:00:45 +0100267 if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100268 return 1;
Jan Beulicha7500362011-07-19 13:00:45 +0100269 READ_LOCK_ATOMIC(inc)(count);
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100270 return 0;
271}
272
Thomas Gleixnere5931942009-12-03 20:08:46 +0100273static inline int arch_write_trylock(arch_rwlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100274{
Jan Beulicha7500362011-07-19 13:00:45 +0100275 atomic_t *count = (atomic_t *)&lock->write;
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100276
Jan Beulicha7500362011-07-19 13:00:45 +0100277 if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100278 return 1;
Jan Beulicha7500362011-07-19 13:00:45 +0100279 atomic_add(WRITE_LOCK_CMP, count);
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100280 return 0;
281}
282
Thomas Gleixnere5931942009-12-03 20:08:46 +0100283static inline void arch_read_unlock(arch_rwlock_t *rw)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100284{
Jan Beulicha7500362011-07-19 13:00:45 +0100285 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
286 :"+m" (rw->lock) : : "memory");
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100287}
288
Thomas Gleixnere5931942009-12-03 20:08:46 +0100289static inline void arch_write_unlock(arch_rwlock_t *rw)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100290{
Jan Beulicha7500362011-07-19 13:00:45 +0100291 asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
292 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100293}
294
Thomas Gleixnere5931942009-12-03 20:08:46 +0100295#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
296#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Robin Holtf5f7eac2009-04-02 16:59:46 -0700297
Jan Beulicha7500362011-07-19 13:00:45 +0100298#undef READ_LOCK_SIZE
299#undef READ_LOCK_ATOMIC
300#undef WRITE_LOCK_ADD
301#undef WRITE_LOCK_SUB
302#undef WRITE_LOCK_CMP
303
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100304#define arch_spin_relax(lock) cpu_relax()
305#define arch_read_relax(lock) cpu_relax()
306#define arch_write_relax(lock) cpu_relax()
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100307
Jiri Olsaad462762009-07-08 12:10:31 +0000308/* The {read|write|spin}_lock() on x86 are full memory barriers. */
309static inline void smp_mb__after_lock(void) { }
310#define ARCH_HAS_SMP_MB_AFTER_LOCK
311
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700312#endif /* _ASM_X86_SPINLOCK_H */