blob: 9726144cdabac781bb60d87e3039c6d926ebb498 [file] [log] [blame]
Glauber de Oliveira Costa2fed0c52008-01-30 13:30:33 +01001#ifndef _X86_SPINLOCK_H_
2#define _X86_SPINLOCK_H_
3
Thomas Gleixner1075cf72008-01-30 13:30:34 +01004#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <asm/processor.h>
Nick Piggin314cdbe2008-01-30 13:31:21 +01008#include <linux/compiler.h>
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -07009#include <asm/paravirt.h>
Thomas Gleixner1075cf72008-01-30 13:30:34 +010010/*
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 *
13 * Simple spin lock operations. There are two variants, one clears IRQ's
14 * on the local processor, one does not.
15 *
Nick Piggin314cdbe2008-01-30 13:31:21 +010016 * These are fair FIFO ticket locks, which are currently limited to 256
17 * CPUs.
Thomas Gleixner1075cf72008-01-30 13:30:34 +010018 *
19 * (the type definitions are in asm/spinlock_types.h)
20 */
21
Thomas Gleixner96a388d2007-10-11 11:20:03 +020022#ifdef CONFIG_X86_32
Thomas Gleixner1075cf72008-01-30 13:30:34 +010023# define LOCK_PTR_REG "a"
Thomas Gleixner96a388d2007-10-11 11:20:03 +020024#else
Thomas Gleixner1075cf72008-01-30 13:30:34 +010025# define LOCK_PTR_REG "D"
Thomas Gleixner96a388d2007-10-11 11:20:03 +020026#endif
Glauber de Oliveira Costa2fed0c52008-01-30 13:30:33 +010027
Nick Piggin3a556b22008-01-30 13:33:00 +010028#if defined(CONFIG_X86_32) && \
29 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
30/*
31 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
32 * (PPro errata 66, 92)
33 */
34# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
35#else
36# define UNLOCK_LOCK_PREFIX
Nick Piggin314cdbe2008-01-30 13:31:21 +010037#endif
38
Nick Piggin3a556b22008-01-30 13:33:00 +010039/*
40 * Ticket locks are conceptually two parts, one indicating the current head of
41 * the queue, and the other indicating the current tail. The lock is acquired
42 * by atomically noting the tail and incrementing it by one (thus adding
43 * ourself to the queue and noting our position), then waiting until the head
44 * becomes equal to the the initial value of the tail.
45 *
46 * We use an xadd covering *both* parts of the lock, to increment the tail and
47 * also load the position of the head, which takes care of memory ordering
48 * issues and should be optimal for the uncontended case. Note the tail must be
49 * in the high part, because a wide xadd increment of the low part would carry
50 * up and contaminate the high part.
51 *
52 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
53 * save some instructions and make the code more elegant. There really isn't
54 * much between them in performance though, especially as locks are out of line.
55 */
56#if (NR_CPUS < 256)
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -070057static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +010058{
Linus Torvalds39f004b2008-05-10 19:52:43 -070059 int tmp = ACCESS_ONCE(lock->slock);
Nick Piggin314cdbe2008-01-30 13:31:21 +010060
61 return (((tmp >> 8) & 0xff) != (tmp & 0xff));
62}
63
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -070064static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
Nick Piggin314cdbe2008-01-30 13:31:21 +010065{
Linus Torvalds39f004b2008-05-10 19:52:43 -070066 int tmp = ACCESS_ONCE(lock->slock);
Nick Piggin314cdbe2008-01-30 13:31:21 +010067
68 return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
Thomas Gleixner1075cf72008-01-30 13:30:34 +010069}
70
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -070071static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +010072{
Nick Piggin314cdbe2008-01-30 13:31:21 +010073 short inc = 0x0100;
74
Joe Perchesd3bf60a2008-03-23 01:03:31 -070075 asm volatile (
Nick Piggin314cdbe2008-01-30 13:31:21 +010076 LOCK_PREFIX "xaddw %w0, %1\n"
77 "1:\t"
78 "cmpb %h0, %b0\n\t"
79 "je 2f\n\t"
80 "rep ; nop\n\t"
81 "movb %1, %b0\n\t"
82 /* don't need lfence here, because loads are in-order */
Thomas Gleixner1075cf72008-01-30 13:30:34 +010083 "jmp 1b\n"
Nick Piggin314cdbe2008-01-30 13:31:21 +010084 "2:"
Joe Perchesd3bf60a2008-03-23 01:03:31 -070085 : "+Q" (inc), "+m" (lock->slock)
Nick Piggin314cdbe2008-01-30 13:31:21 +010086 :
Joe Perchesd3bf60a2008-03-23 01:03:31 -070087 : "memory", "cc");
Thomas Gleixner1075cf72008-01-30 13:30:34 +010088}
89
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -070090static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +010091{
Nick Piggin314cdbe2008-01-30 13:31:21 +010092 int tmp;
93 short new;
Thomas Gleixner1075cf72008-01-30 13:30:34 +010094
Joe Perchesd3bf60a2008-03-23 01:03:31 -070095 asm volatile("movw %2,%w0\n\t"
96 "cmpb %h0,%b0\n\t"
97 "jne 1f\n\t"
98 "movw %w0,%w1\n\t"
99 "incb %h1\n\t"
100 "lock ; cmpxchgw %w1,%2\n\t"
101 "1:"
102 "sete %b1\n\t"
103 "movzbl %b1,%0\n\t"
104 : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
105 :
106 : "memory", "cc");
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100107
Nick Piggin314cdbe2008-01-30 13:31:21 +0100108 return tmp;
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100109}
110
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700111static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100112{
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700113 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
114 : "+m" (lock->slock)
115 :
116 : "memory", "cc");
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100117}
Nick Piggin3a556b22008-01-30 13:33:00 +0100118#else
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700119static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100120{
Linus Torvalds39f004b2008-05-10 19:52:43 -0700121 int tmp = ACCESS_ONCE(lock->slock);
Nick Piggin3a556b22008-01-30 13:33:00 +0100122
123 return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
124}
125
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700126static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100127{
Linus Torvalds39f004b2008-05-10 19:52:43 -0700128 int tmp = ACCESS_ONCE(lock->slock);
Nick Piggin3a556b22008-01-30 13:33:00 +0100129
130 return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
131}
132
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700133static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100134{
135 int inc = 0x00010000;
136 int tmp;
137
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700138 asm volatile("lock ; xaddl %0, %1\n"
139 "movzwl %w0, %2\n\t"
140 "shrl $16, %0\n\t"
141 "1:\t"
142 "cmpl %0, %2\n\t"
143 "je 2f\n\t"
144 "rep ; nop\n\t"
145 "movzwl %1, %2\n\t"
146 /* don't need lfence here, because loads are in-order */
147 "jmp 1b\n"
148 "2:"
149 : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
150 :
151 : "memory", "cc");
Nick Piggin3a556b22008-01-30 13:33:00 +0100152}
153
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700154static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100155{
156 int tmp;
157 int new;
158
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700159 asm volatile("movl %2,%0\n\t"
160 "movl %0,%1\n\t"
161 "roll $16, %0\n\t"
162 "cmpl %0,%1\n\t"
163 "jne 1f\n\t"
164 "addl $0x00010000, %1\n\t"
165 "lock ; cmpxchgl %1,%2\n\t"
166 "1:"
167 "sete %b1\n\t"
168 "movzbl %b1,%0\n\t"
169 : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
170 :
171 : "memory", "cc");
Nick Piggin3a556b22008-01-30 13:33:00 +0100172
173 return tmp;
174}
175
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700176static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
Nick Piggin3a556b22008-01-30 13:33:00 +0100177{
Joe Perchesd3bf60a2008-03-23 01:03:31 -0700178 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
179 : "+m" (lock->slock)
180 :
181 : "memory", "cc");
Nick Piggin3a556b22008-01-30 13:33:00 +0100182}
183#endif
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100184
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700185#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
186
187#ifndef CONFIG_PARAVIRT
188static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
189{
190 return __ticket_spin_is_locked(lock);
191}
192
193static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
194{
195 return __ticket_spin_is_contended(lock);
196}
197
198static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
199{
200 __ticket_spin_lock(lock);
201}
202
203static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
204{
205 return __ticket_spin_trylock(lock);
206}
207
208static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
209{
210 __ticket_spin_unlock(lock);
211}
212#endif /* CONFIG_PARAVIRT */
213
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100214static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
215{
216 while (__raw_spin_is_locked(lock))
217 cpu_relax();
218}
219
220/*
221 * Read-write spinlocks, allowing multiple readers
222 * but only one writer.
223 *
224 * NOTE! it is quite common to have readers in interrupts
225 * but no interrupt writers. For those circumstances we
226 * can "mix" irq-safe locks - any writer needs to get a
227 * irq-safe write-lock, but readers can get non-irqsafe
228 * read-locks.
229 *
230 * On x86, we implement read-write locks as a 32-bit counter
231 * with the high bit (sign) being the "contended" bit.
232 */
233
Nick Piggin314cdbe2008-01-30 13:31:21 +0100234/**
235 * read_can_lock - would read_trylock() succeed?
236 * @lock: the rwlock in question.
237 */
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100238static inline int __raw_read_can_lock(raw_rwlock_t *lock)
239{
240 return (int)(lock)->lock > 0;
241}
242
Nick Piggin314cdbe2008-01-30 13:31:21 +0100243/**
244 * write_can_lock - would write_trylock() succeed?
245 * @lock: the rwlock in question.
246 */
Thomas Gleixner1075cf72008-01-30 13:30:34 +0100247static inline int __raw_write_can_lock(raw_rwlock_t *lock)
248{
249 return (lock)->lock == RW_LOCK_BIAS;
250}
251
252static inline void __raw_read_lock(raw_rwlock_t *rw)
253{
254 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
255 "jns 1f\n"
256 "call __read_lock_failed\n\t"
257 "1:\n"
258 ::LOCK_PTR_REG (rw) : "memory");
259}
260
261static inline void __raw_write_lock(raw_rwlock_t *rw)
262{
263 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
264 "jz 1f\n"
265 "call __write_lock_failed\n\t"
266 "1:\n"
267 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
268}
269
270static inline int __raw_read_trylock(raw_rwlock_t *lock)
271{
272 atomic_t *count = (atomic_t *)lock;
273
274 atomic_dec(count);
275 if (atomic_read(count) >= 0)
276 return 1;
277 atomic_inc(count);
278 return 0;
279}
280
281static inline int __raw_write_trylock(raw_rwlock_t *lock)
282{
283 atomic_t *count = (atomic_t *)lock;
284
285 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
286 return 1;
287 atomic_add(RW_LOCK_BIAS, count);
288 return 0;
289}
290
291static inline void __raw_read_unlock(raw_rwlock_t *rw)
292{
293 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
294}
295
296static inline void __raw_write_unlock(raw_rwlock_t *rw)
297{
298 asm volatile(LOCK_PREFIX "addl %1, %0"
299 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
300}
301
302#define _raw_spin_relax(lock) cpu_relax()
303#define _raw_read_relax(lock) cpu_relax()
304#define _raw_write_relax(lock) cpu_relax()
305
Glauber de Oliveira Costa2fed0c52008-01-30 13:30:33 +0100306#endif