H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_SPINLOCK_H |
| 2 | #define _ASM_X86_SPINLOCK_H |
Glauber de Oliveira Costa | 2fed0c5 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 3 | |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 4 | #include <linux/jump_label.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 5 | #include <linux/atomic.h> |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 6 | #include <asm/page.h> |
| 7 | #include <asm/processor.h> |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 8 | #include <linux/compiler.h> |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 9 | #include <asm/paravirt.h> |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 10 | #include <asm/bitops.h> |
| 11 | |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 12 | /* |
| 13 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 14 | * |
| 15 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 16 | * on the local processor, one does not. |
| 17 | * |
Richard Weinberger | 83be4ff | 2012-08-14 14:47:37 -0700 | [diff] [blame] | 18 | * These are fair FIFO ticket locks, which support up to 2^16 CPUs. |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 19 | * |
| 20 | * (the type definitions are in asm/spinlock_types.h) |
| 21 | */ |
| 22 | |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 23 | #ifdef CONFIG_X86_32 |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 24 | # define LOCK_PTR_REG "a" |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 25 | #else |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 26 | # define LOCK_PTR_REG "D" |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 27 | #endif |
Glauber de Oliveira Costa | 2fed0c5 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 28 | |
Dave Jones | 09df7c4 | 2014-03-10 19:32:22 -0400 | [diff] [blame] | 29 | #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE)) |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 30 | /* |
Dave Jones | 09df7c4 | 2014-03-10 19:32:22 -0400 | [diff] [blame] | 31 | * On PPro SMP, we use a locked operation to unlock |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 32 | * (PPro errata 66, 92) |
| 33 | */ |
| 34 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX |
| 35 | #else |
| 36 | # define UNLOCK_LOCK_PREFIX |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 37 | #endif |
| 38 | |
Jeremy Fitzhardinge | 545ac13 | 2013-08-09 19:51:49 +0530 | [diff] [blame] | 39 | /* How long a lock should spin before we consider blocking */ |
| 40 | #define SPIN_THRESHOLD (1 << 15) |
| 41 | |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 42 | extern struct static_key paravirt_ticketlocks_enabled; |
| 43 | static __always_inline bool static_key_false(struct static_key *key); |
Jeremy Fitzhardinge | 545ac13 | 2013-08-09 19:51:49 +0530 | [diff] [blame] | 44 | |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 45 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
| 46 | |
| 47 | static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) |
| 48 | { |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 49 | set_bit(0, (volatile unsigned long *)&lock->tickets.head); |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 50 | } |
| 51 | |
| 52 | #else /* !CONFIG_PARAVIRT_SPINLOCKS */ |
| 53 | static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, |
| 54 | __ticket_t ticket) |
| 55 | { |
| 56 | } |
| 57 | static inline void __ticket_unlock_kick(arch_spinlock_t *lock, |
Jeremy Fitzhardinge | 545ac13 | 2013-08-09 19:51:49 +0530 | [diff] [blame] | 58 | __ticket_t ticket) |
| 59 | { |
| 60 | } |
| 61 | |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 62 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 63 | static inline int __tickets_equal(__ticket_t one, __ticket_t two) |
| 64 | { |
| 65 | return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); |
| 66 | } |
| 67 | |
| 68 | static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, |
| 69 | __ticket_t head) |
| 70 | { |
| 71 | if (head & TICKET_SLOWPATH_FLAG) { |
| 72 | arch_spinlock_t old, new; |
| 73 | |
| 74 | old.tickets.head = head; |
| 75 | new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; |
| 76 | old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; |
| 77 | new.tickets.tail = old.tickets.tail; |
| 78 | |
| 79 | /* try to clear slowpath flag when there are no contenders */ |
| 80 | cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); |
| 81 | } |
| 82 | } |
Jeremy Fitzhardinge | 545ac13 | 2013-08-09 19:51:49 +0530 | [diff] [blame] | 83 | |
Linus Torvalds | bc08b44 | 2013-09-02 12:12:15 -0700 | [diff] [blame] | 84 | static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
| 85 | { |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 86 | return __tickets_equal(lock.tickets.head, lock.tickets.tail); |
Linus Torvalds | bc08b44 | 2013-09-02 12:12:15 -0700 | [diff] [blame] | 87 | } |
| 88 | |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 89 | /* |
| 90 | * Ticket locks are conceptually two parts, one indicating the current head of |
| 91 | * the queue, and the other indicating the current tail. The lock is acquired |
| 92 | * by atomically noting the tail and incrementing it by one (thus adding |
| 93 | * ourself to the queue and noting our position), then waiting until the head |
| 94 | * becomes equal to the the initial value of the tail. |
| 95 | * |
| 96 | * We use an xadd covering *both* parts of the lock, to increment the tail and |
| 97 | * also load the position of the head, which takes care of memory ordering |
| 98 | * issues and should be optimal for the uncontended case. Note the tail must be |
| 99 | * in the high part, because a wide xadd increment of the low part would carry |
| 100 | * up and contaminate the high part. |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 101 | */ |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 102 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 103 | { |
Jeremy Fitzhardinge | 4a1ed4c | 2013-08-09 19:51:56 +0530 | [diff] [blame] | 104 | register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC }; |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 105 | |
Jeremy Fitzhardinge | 2994488 | 2010-07-13 14:07:45 -0700 | [diff] [blame] | 106 | inc = xadd(&lock->tickets, inc); |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 107 | if (likely(inc.head == inc.tail)) |
| 108 | goto out; |
Jeremy Fitzhardinge | c576a3e | 2010-07-03 01:06:04 +0100 | [diff] [blame] | 109 | |
| 110 | for (;;) { |
Jeremy Fitzhardinge | 545ac13 | 2013-08-09 19:51:49 +0530 | [diff] [blame] | 111 | unsigned count = SPIN_THRESHOLD; |
| 112 | |
| 113 | do { |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 114 | inc.head = READ_ONCE(lock->tickets.head); |
| 115 | if (__tickets_equal(inc.head, inc.tail)) |
| 116 | goto clear_slowpath; |
Jeremy Fitzhardinge | 545ac13 | 2013-08-09 19:51:49 +0530 | [diff] [blame] | 117 | cpu_relax(); |
Jeremy Fitzhardinge | 545ac13 | 2013-08-09 19:51:49 +0530 | [diff] [blame] | 118 | } while (--count); |
| 119 | __ticket_lock_spinning(lock, inc.tail); |
Jeremy Fitzhardinge | c576a3e | 2010-07-03 01:06:04 +0100 | [diff] [blame] | 120 | } |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 121 | clear_slowpath: |
| 122 | __ticket_check_and_clear_slowpath(lock, inc.head); |
| 123 | out: |
| 124 | barrier(); /* make sure nothing creeps before the lock is taken */ |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 125 | } |
| 126 | |
Jeremy Fitzhardinge | b798df0 | 2013-08-09 19:51:51 +0530 | [diff] [blame] | 127 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 128 | { |
Jeremy Fitzhardinge | 229855d | 2010-07-13 15:14:26 -0700 | [diff] [blame] | 129 | arch_spinlock_t old, new; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 130 | |
Christian Borntraeger | 4f9d138 | 2014-11-24 10:53:46 +0100 | [diff] [blame] | 131 | old.tickets = READ_ONCE(lock->tickets); |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 132 | if (!__tickets_equal(old.tickets.head, old.tickets.tail)) |
Jeremy Fitzhardinge | 229855d | 2010-07-13 15:14:26 -0700 | [diff] [blame] | 133 | return 0; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 134 | |
Jeremy Fitzhardinge | 4a1ed4c | 2013-08-09 19:51:56 +0530 | [diff] [blame] | 135 | new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 136 | new.head_tail &= ~TICKET_SLOWPATH_FLAG; |
Jeremy Fitzhardinge | 229855d | 2010-07-13 15:14:26 -0700 | [diff] [blame] | 137 | |
| 138 | /* cmpxchg is a full barrier, so nothing can move before it */ |
| 139 | return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 140 | } |
| 141 | |
Jeremy Fitzhardinge | b798df0 | 2013-08-09 19:51:51 +0530 | [diff] [blame] | 142 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 143 | { |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 144 | if (TICKET_SLOWPATH_FLAG && |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 145 | static_key_false(¶virt_ticketlocks_enabled)) { |
| 146 | __ticket_t head; |
Jeremy Fitzhardinge | 545ac13 | 2013-08-09 19:51:49 +0530 | [diff] [blame] | 147 | |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 148 | BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 149 | |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 150 | head = xadd(&lock->tickets.head, TICKET_LOCK_INC); |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 151 | |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 152 | if (unlikely(head & TICKET_SLOWPATH_FLAG)) { |
| 153 | head &= ~TICKET_SLOWPATH_FLAG; |
| 154 | __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC)); |
| 155 | } |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 156 | } else |
| 157 | __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 158 | } |
| 159 | |
Jeremy Fitzhardinge | b798df0 | 2013-08-09 19:51:51 +0530 | [diff] [blame] | 160 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 161 | { |
Christian Borntraeger | 4f9d138 | 2014-11-24 10:53:46 +0100 | [diff] [blame] | 162 | struct __raw_tickets tmp = READ_ONCE(lock->tickets); |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 163 | |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 164 | return !__tickets_equal(tmp.tail, tmp.head); |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 165 | } |
| 166 | |
Jeremy Fitzhardinge | b798df0 | 2013-08-09 19:51:51 +0530 | [diff] [blame] | 167 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 168 | { |
Christian Borntraeger | 4f9d138 | 2014-11-24 10:53:46 +0100 | [diff] [blame] | 169 | struct __raw_tickets tmp = READ_ONCE(lock->tickets); |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 170 | |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 171 | tmp.head &= ~TICKET_SLOWPATH_FLAG; |
Tahsin Erdogan | e8a4a26 | 2015-05-04 21:15:31 -0700 | [diff] [blame] | 172 | return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 173 | } |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 174 | #define arch_spin_is_contended arch_spin_is_contended |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 175 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 176 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
Jeremy Fitzhardinge | 63d3a75 | 2008-08-19 13:19:36 -0700 | [diff] [blame] | 177 | unsigned long flags) |
| 178 | { |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 179 | arch_spin_lock(lock); |
Jeremy Fitzhardinge | 63d3a75 | 2008-08-19 13:19:36 -0700 | [diff] [blame] | 180 | } |
| 181 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 182 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 183 | { |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 184 | __ticket_t head = READ_ONCE(lock->tickets.head); |
Oleg Nesterov | 78bff1c | 2014-12-01 22:34:17 +0100 | [diff] [blame] | 185 | |
| 186 | for (;;) { |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 187 | struct __raw_tickets tmp = READ_ONCE(lock->tickets); |
Oleg Nesterov | 78bff1c | 2014-12-01 22:34:17 +0100 | [diff] [blame] | 188 | /* |
| 189 | * We need to check "unlocked" in a loop, tmp.head == head |
| 190 | * can be false positive because of overflow. |
| 191 | */ |
Raghavendra K T | d6abfdb | 2015-02-06 16:44:11 +0530 | [diff] [blame] | 192 | if (__tickets_equal(tmp.head, tmp.tail) || |
| 193 | !__tickets_equal(tmp.head, head)) |
Oleg Nesterov | 78bff1c | 2014-12-01 22:34:17 +0100 | [diff] [blame] | 194 | break; |
| 195 | |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 196 | cpu_relax(); |
Oleg Nesterov | 78bff1c | 2014-12-01 22:34:17 +0100 | [diff] [blame] | 197 | } |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | /* |
| 201 | * Read-write spinlocks, allowing multiple readers |
| 202 | * but only one writer. |
| 203 | * |
| 204 | * NOTE! it is quite common to have readers in interrupts |
| 205 | * but no interrupt writers. For those circumstances we |
| 206 | * can "mix" irq-safe locks - any writer needs to get a |
| 207 | * irq-safe write-lock, but readers can get non-irqsafe |
| 208 | * read-locks. |
| 209 | * |
Waiman Long | 2ff810a | 2014-08-14 13:27:30 -0400 | [diff] [blame] | 210 | * On x86, we implement read-write locks using the generic qrwlock with |
| 211 | * x86 specific optimization. |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 212 | */ |
| 213 | |
Waiman Long | bd01ec1 | 2014-02-03 13:18:57 +0100 | [diff] [blame] | 214 | #include <asm/qrwlock.h> |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 215 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 216 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 217 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
Robin Holt | f5f7eac | 2009-04-02 16:59:46 -0700 | [diff] [blame] | 218 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 219 | #define arch_spin_relax(lock) cpu_relax() |
| 220 | #define arch_read_relax(lock) cpu_relax() |
| 221 | #define arch_write_relax(lock) cpu_relax() |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 222 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 223 | #endif /* _ASM_X86_SPINLOCK_H */ |