H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_SPINLOCK_H |
| 2 | #define _ASM_X86_SPINLOCK_H |
Glauber de Oliveira Costa | 2fed0c5 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 3 | |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 4 | #include <linux/atomic.h> |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 5 | #include <asm/page.h> |
| 6 | #include <asm/processor.h> |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 7 | #include <linux/compiler.h> |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 8 | #include <asm/paravirt.h> |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 9 | /* |
| 10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 11 | * |
| 12 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 13 | * on the local processor, one does not. |
| 14 | * |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 15 | * These are fair FIFO ticket locks, which are currently limited to 256 |
| 16 | * CPUs. |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 17 | * |
| 18 | * (the type definitions are in asm/spinlock_types.h) |
| 19 | */ |
| 20 | |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 21 | #ifdef CONFIG_X86_32 |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 22 | # define LOCK_PTR_REG "a" |
Jan Beulich | 74e9160 | 2008-09-05 13:27:45 +0100 | [diff] [blame] | 23 | # define REG_PTR_MODE "k" |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 24 | #else |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 25 | # define LOCK_PTR_REG "D" |
Jan Beulich | 74e9160 | 2008-09-05 13:27:45 +0100 | [diff] [blame] | 26 | # define REG_PTR_MODE "q" |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 27 | #endif |
Glauber de Oliveira Costa | 2fed0c5 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 28 | |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 29 | #if defined(CONFIG_X86_32) && \ |
| 30 | (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) |
| 31 | /* |
| 32 | * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock |
| 33 | * (PPro errata 66, 92) |
| 34 | */ |
| 35 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX |
| 36 | #else |
| 37 | # define UNLOCK_LOCK_PREFIX |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 38 | #endif |
| 39 | |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 40 | /* |
| 41 | * Ticket locks are conceptually two parts, one indicating the current head of |
| 42 | * the queue, and the other indicating the current tail. The lock is acquired |
| 43 | * by atomically noting the tail and incrementing it by one (thus adding |
| 44 | * ourself to the queue and noting our position), then waiting until the head |
| 45 | * becomes equal to the the initial value of the tail. |
| 46 | * |
| 47 | * We use an xadd covering *both* parts of the lock, to increment the tail and |
| 48 | * also load the position of the head, which takes care of memory ordering |
| 49 | * issues and should be optimal for the uncontended case. Note the tail must be |
| 50 | * in the high part, because a wide xadd increment of the low part would carry |
| 51 | * up and contaminate the high part. |
| 52 | * |
| 53 | * With fewer than 2^8 possible CPUs, we can use x86's partial registers to |
| 54 | * save some instructions and make the code more elegant. There really isn't |
| 55 | * much between them in performance though, especially as locks are out of line. |
| 56 | */ |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 57 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 58 | { |
Jeremy Fitzhardinge | 2994488 | 2010-07-13 14:07:45 -0700 | [diff] [blame] | 59 | register struct __raw_tickets inc = { .tail = 1 }; |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 60 | |
Jeremy Fitzhardinge | 2994488 | 2010-07-13 14:07:45 -0700 | [diff] [blame] | 61 | inc = xadd(&lock->tickets, inc); |
Jeremy Fitzhardinge | c576a3e | 2010-07-03 01:06:04 +0100 | [diff] [blame] | 62 | |
| 63 | for (;;) { |
Jeremy Fitzhardinge | 2994488 | 2010-07-13 14:07:45 -0700 | [diff] [blame] | 64 | if (inc.head == inc.tail) |
Jeremy Fitzhardinge | c576a3e | 2010-07-03 01:06:04 +0100 | [diff] [blame] | 65 | break; |
| 66 | cpu_relax(); |
Jeremy Fitzhardinge | 2994488 | 2010-07-13 14:07:45 -0700 | [diff] [blame] | 67 | inc.head = ACCESS_ONCE(lock->tickets.head); |
Jeremy Fitzhardinge | c576a3e | 2010-07-03 01:06:04 +0100 | [diff] [blame] | 68 | } |
| 69 | barrier(); /* make sure nothing creeps before the lock is taken */ |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 70 | } |
| 71 | |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 72 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 73 | { |
Jeremy Fitzhardinge | 229855d | 2010-07-13 15:14:26 -0700 | [diff] [blame^] | 74 | arch_spinlock_t old, new; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 75 | |
Jeremy Fitzhardinge | 229855d | 2010-07-13 15:14:26 -0700 | [diff] [blame^] | 76 | old.tickets = ACCESS_ONCE(lock->tickets); |
| 77 | if (old.tickets.head != old.tickets.tail) |
| 78 | return 0; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 79 | |
Jeremy Fitzhardinge | 229855d | 2010-07-13 15:14:26 -0700 | [diff] [blame^] | 80 | new.head_tail = old.head_tail + (1 << TICKET_SHIFT); |
| 81 | |
| 82 | /* cmpxchg is a full barrier, so nothing can move before it */ |
| 83 | return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 84 | } |
| 85 | |
Jeremy Fitzhardinge | 229855d | 2010-07-13 15:14:26 -0700 | [diff] [blame^] | 86 | #if (NR_CPUS < 256) |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 87 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 88 | { |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 89 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
Jeremy Fitzhardinge | 229855d | 2010-07-13 15:14:26 -0700 | [diff] [blame^] | 90 | : "+m" (lock->head_tail) |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 91 | : |
| 92 | : "memory", "cc"); |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 93 | } |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 94 | #else |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 95 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 96 | { |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 97 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
Jeremy Fitzhardinge | 229855d | 2010-07-13 15:14:26 -0700 | [diff] [blame^] | 98 | : "+m" (lock->head_tail) |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 99 | : |
| 100 | : "memory", "cc"); |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 101 | } |
| 102 | #endif |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 103 | |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 104 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 105 | { |
Jeremy Fitzhardinge | 84eb950 | 2010-07-02 23:26:36 +0100 | [diff] [blame] | 106 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 107 | |
Jeremy Fitzhardinge | 84eb950 | 2010-07-02 23:26:36 +0100 | [diff] [blame] | 108 | return !!(tmp.tail ^ tmp.head); |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 109 | } |
| 110 | |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 111 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 112 | { |
Jeremy Fitzhardinge | 84eb950 | 2010-07-02 23:26:36 +0100 | [diff] [blame] | 113 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 114 | |
Jeremy Fitzhardinge | 84eb950 | 2010-07-02 23:26:36 +0100 | [diff] [blame] | 115 | return ((tmp.tail - tmp.head) & TICKET_MASK) > 1; |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 116 | } |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 117 | |
Jeremy Fitzhardinge | b4ecc12 | 2009-05-13 17:16:55 -0700 | [diff] [blame] | 118 | #ifndef CONFIG_PARAVIRT_SPINLOCKS |
Jeremy Fitzhardinge | 8efcbab | 2008-07-07 12:07:51 -0700 | [diff] [blame] | 119 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 120 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 121 | { |
| 122 | return __ticket_spin_is_locked(lock); |
| 123 | } |
| 124 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 125 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 126 | { |
| 127 | return __ticket_spin_is_contended(lock); |
| 128 | } |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 129 | #define arch_spin_is_contended arch_spin_is_contended |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 130 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 131 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 132 | { |
| 133 | __ticket_spin_lock(lock); |
| 134 | } |
| 135 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 136 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 137 | { |
| 138 | return __ticket_spin_trylock(lock); |
| 139 | } |
| 140 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 141 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 142 | { |
| 143 | __ticket_spin_unlock(lock); |
| 144 | } |
Jeremy Fitzhardinge | 63d3a75 | 2008-08-19 13:19:36 -0700 | [diff] [blame] | 145 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 146 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
Jeremy Fitzhardinge | 63d3a75 | 2008-08-19 13:19:36 -0700 | [diff] [blame] | 147 | unsigned long flags) |
| 148 | { |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 149 | arch_spin_lock(lock); |
Jeremy Fitzhardinge | 63d3a75 | 2008-08-19 13:19:36 -0700 | [diff] [blame] | 150 | } |
| 151 | |
Jeremy Fitzhardinge | b4ecc12 | 2009-05-13 17:16:55 -0700 | [diff] [blame] | 152 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 153 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 154 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 155 | { |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 156 | while (arch_spin_is_locked(lock)) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 157 | cpu_relax(); |
| 158 | } |
| 159 | |
| 160 | /* |
| 161 | * Read-write spinlocks, allowing multiple readers |
| 162 | * but only one writer. |
| 163 | * |
| 164 | * NOTE! it is quite common to have readers in interrupts |
| 165 | * but no interrupt writers. For those circumstances we |
| 166 | * can "mix" irq-safe locks - any writer needs to get a |
| 167 | * irq-safe write-lock, but readers can get non-irqsafe |
| 168 | * read-locks. |
| 169 | * |
| 170 | * On x86, we implement read-write locks as a 32-bit counter |
| 171 | * with the high bit (sign) being the "contended" bit. |
| 172 | */ |
| 173 | |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 174 | /** |
| 175 | * read_can_lock - would read_trylock() succeed? |
| 176 | * @lock: the rwlock in question. |
| 177 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 178 | static inline int arch_read_can_lock(arch_rwlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 179 | { |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 180 | return lock->lock > 0; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 181 | } |
| 182 | |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 183 | /** |
| 184 | * write_can_lock - would write_trylock() succeed? |
| 185 | * @lock: the rwlock in question. |
| 186 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 187 | static inline int arch_write_can_lock(arch_rwlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 188 | { |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 189 | return lock->write == WRITE_LOCK_CMP; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 190 | } |
| 191 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 192 | static inline void arch_read_lock(arch_rwlock_t *rw) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 193 | { |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 194 | asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 195 | "jns 1f\n" |
| 196 | "call __read_lock_failed\n\t" |
| 197 | "1:\n" |
| 198 | ::LOCK_PTR_REG (rw) : "memory"); |
| 199 | } |
| 200 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 201 | static inline void arch_write_lock(arch_rwlock_t *rw) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 202 | { |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 203 | asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 204 | "jz 1f\n" |
| 205 | "call __write_lock_failed\n\t" |
| 206 | "1:\n" |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 207 | ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) |
| 208 | : "memory"); |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 209 | } |
| 210 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 211 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 212 | { |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 213 | READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 214 | |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 215 | if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 216 | return 1; |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 217 | READ_LOCK_ATOMIC(inc)(count); |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 218 | return 0; |
| 219 | } |
| 220 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 221 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 222 | { |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 223 | atomic_t *count = (atomic_t *)&lock->write; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 224 | |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 225 | if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 226 | return 1; |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 227 | atomic_add(WRITE_LOCK_CMP, count); |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 228 | return 0; |
| 229 | } |
| 230 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 231 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 232 | { |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 233 | asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" |
| 234 | :"+m" (rw->lock) : : "memory"); |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 235 | } |
| 236 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 237 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 238 | { |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 239 | asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" |
| 240 | : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 241 | } |
| 242 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 243 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 244 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
Robin Holt | f5f7eac | 2009-04-02 16:59:46 -0700 | [diff] [blame] | 245 | |
Jan Beulich | a750036 | 2011-07-19 13:00:45 +0100 | [diff] [blame] | 246 | #undef READ_LOCK_SIZE |
| 247 | #undef READ_LOCK_ATOMIC |
| 248 | #undef WRITE_LOCK_ADD |
| 249 | #undef WRITE_LOCK_SUB |
| 250 | #undef WRITE_LOCK_CMP |
| 251 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 252 | #define arch_spin_relax(lock) cpu_relax() |
| 253 | #define arch_read_relax(lock) cpu_relax() |
| 254 | #define arch_write_relax(lock) cpu_relax() |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 255 | |
Jiri Olsa | ad46276 | 2009-07-08 12:10:31 +0000 | [diff] [blame] | 256 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ |
| 257 | static inline void smp_mb__after_lock(void) { } |
| 258 | #define ARCH_HAS_SMP_MB_AFTER_LOCK |
| 259 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 260 | #endif /* _ASM_X86_SPINLOCK_H */ |