Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * S390 version |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 3 | * Copyright IBM Corp. 1999 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 5 | * |
| 6 | * Derived from "include/asm-i386/spinlock.h" |
| 7 | */ |
| 8 | |
| 9 | #ifndef __ASM_SPINLOCK_H |
| 10 | #define __ASM_SPINLOCK_H |
| 11 | |
Martin Schwidefsky | 3c1fcfe | 2006-09-30 23:27:45 -0700 | [diff] [blame] | 12 | #include <linux/smp.h> |
Peter Zijlstra | 726328d | 2016-05-26 10:35:03 +0200 | [diff] [blame] | 13 | #include <asm/barrier.h> |
| 14 | #include <asm/processor.h> |
Martin Schwidefsky | 3c1fcfe | 2006-09-30 23:27:45 -0700 | [diff] [blame] | 15 | |
Philipp Hachtmann | 6c8cd5b | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 16 | #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) |
| 17 | |
Martin Schwidefsky | 638ad34 | 2011-10-30 15:17:13 +0100 | [diff] [blame] | 18 | extern int spin_retry; |
| 19 | |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 20 | static inline int |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 21 | _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 22 | { |
Martin Schwidefsky | f318a12 | 2014-10-29 12:50:31 +0100 | [diff] [blame] | 23 | return __sync_bool_compare_and_swap(lock, old, new); |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 24 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Christian Borntraeger | 760928c | 2016-11-02 05:08:32 -0400 | [diff] [blame] | 26 | #ifndef CONFIG_SMP |
| 27 | static inline bool arch_vcpu_is_preempted(int cpu) { return false; } |
| 28 | #else |
| 29 | bool arch_vcpu_is_preempted(int cpu); |
| 30 | #endif |
| 31 | |
| 32 | #define vcpu_is_preempted arch_vcpu_is_preempted |
| 33 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | /* |
| 35 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 36 | * on the local processor, one does not. |
| 37 | * |
| 38 | * We make no fairness assumptions. They have a cost. |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 39 | * |
| 40 | * (the type definitions are in asm/spinlock_types.h) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | */ |
| 42 | |
Martin Schwidefsky | d59b93d | 2014-09-19 14:29:31 +0200 | [diff] [blame] | 43 | void arch_lock_relax(unsigned int cpu); |
| 44 | |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 45 | void arch_spin_lock_wait(arch_spinlock_t *); |
| 46 | int arch_spin_trylock_retry(arch_spinlock_t *); |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 47 | void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Martin Schwidefsky | d59b93d | 2014-09-19 14:29:31 +0200 | [diff] [blame] | 49 | static inline void arch_spin_relax(arch_spinlock_t *lock) |
| 50 | { |
| 51 | arch_lock_relax(lock->lock); |
| 52 | } |
| 53 | |
Philipp Hachtmann | 6c8cd5b | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 54 | static inline u32 arch_spin_lockval(int cpu) |
| 55 | { |
| 56 | return ~cpu; |
| 57 | } |
| 58 | |
Heiko Carstens | efc1d23 | 2013-09-05 13:26:17 +0200 | [diff] [blame] | 59 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
| 60 | { |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 61 | return lock.lock == 0; |
| 62 | } |
| 63 | |
| 64 | static inline int arch_spin_is_locked(arch_spinlock_t *lp) |
| 65 | { |
| 66 | return ACCESS_ONCE(lp->lock) != 0; |
| 67 | } |
| 68 | |
| 69 | static inline int arch_spin_trylock_once(arch_spinlock_t *lp) |
| 70 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame] | 71 | barrier(); |
| 72 | return likely(arch_spin_value_unlocked(*lp) && |
| 73 | _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 74 | } |
| 75 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 76 | static inline void arch_spin_lock(arch_spinlock_t *lp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame] | 78 | if (!arch_spin_trylock_once(lp)) |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 79 | arch_spin_lock_wait(lp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | } |
| 81 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 82 | static inline void arch_spin_lock_flags(arch_spinlock_t *lp, |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 83 | unsigned long flags) |
Hisashi Hifumi | 894cdde | 2008-01-26 14:11:28 +0100 | [diff] [blame] | 84 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame] | 85 | if (!arch_spin_trylock_once(lp)) |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 86 | arch_spin_lock_wait_flags(lp, flags); |
Hisashi Hifumi | 894cdde | 2008-01-26 14:11:28 +0100 | [diff] [blame] | 87 | } |
| 88 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 89 | static inline int arch_spin_trylock(arch_spinlock_t *lp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame] | 91 | if (!arch_spin_trylock_once(lp)) |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 92 | return arch_spin_trylock_retry(lp); |
| 93 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | } |
| 95 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 96 | static inline void arch_spin_unlock(arch_spinlock_t *lp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | { |
Heiko Carstens | 4423028 | 2014-09-08 08:20:43 +0200 | [diff] [blame] | 98 | typecheck(unsigned int, lp->lock); |
| 99 | asm volatile( |
Heiko Carstens | 4423028 | 2014-09-08 08:20:43 +0200 | [diff] [blame] | 100 | "st %1,%0\n" |
| 101 | : "+Q" (lp->lock) |
| 102 | : "d" (0) |
| 103 | : "cc", "memory"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | } |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 105 | |
| 106 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
| 107 | { |
| 108 | while (arch_spin_is_locked(lock)) |
| 109 | arch_spin_relax(lock); |
Peter Zijlstra | 726328d | 2016-05-26 10:35:03 +0200 | [diff] [blame] | 110 | smp_acquire__after_ctrl_dep(); |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 111 | } |
| 112 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | /* |
| 114 | * Read-write spinlocks, allowing multiple readers |
| 115 | * but only one writer. |
| 116 | * |
| 117 | * NOTE! it is quite common to have readers in interrupts |
| 118 | * but no interrupt writers. For those circumstances we |
| 119 | * can "mix" irq-safe locks - any writer needs to get a |
| 120 | * irq-safe write-lock, but readers can get non-irqsafe |
| 121 | * read-locks. |
| 122 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
| 124 | /** |
| 125 | * read_can_lock - would read_trylock() succeed? |
| 126 | * @lock: the rwlock in question. |
| 127 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 128 | #define arch_read_can_lock(x) ((int)(x)->lock >= 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | |
| 130 | /** |
| 131 | * write_can_lock - would write_trylock() succeed? |
| 132 | * @lock: the rwlock in question. |
| 133 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 134 | #define arch_write_can_lock(x) ((x)->lock == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Martin Schwidefsky | 2684e73 | 2014-09-22 14:45:11 +0200 | [diff] [blame] | 136 | extern int _raw_read_trylock_retry(arch_rwlock_t *lp); |
Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 137 | extern int _raw_write_trylock_retry(arch_rwlock_t *lp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | |
Martin Schwidefsky | 2684e73 | 2014-09-22 14:45:11 +0200 | [diff] [blame] | 139 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 140 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 141 | |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame] | 142 | static inline int arch_read_trylock_once(arch_rwlock_t *rw) |
| 143 | { |
| 144 | unsigned int old = ACCESS_ONCE(rw->lock); |
| 145 | return likely((int) old >= 0 && |
| 146 | _raw_compare_and_swap(&rw->lock, old, old + 1)); |
| 147 | } |
| 148 | |
| 149 | static inline int arch_write_trylock_once(arch_rwlock_t *rw) |
| 150 | { |
| 151 | unsigned int old = ACCESS_ONCE(rw->lock); |
| 152 | return likely(old == 0 && |
| 153 | _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); |
| 154 | } |
| 155 | |
Martin Schwidefsky | bbae71b | 2014-09-22 16:34:38 +0200 | [diff] [blame] | 156 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
| 157 | |
| 158 | #define __RAW_OP_OR "lao" |
| 159 | #define __RAW_OP_AND "lan" |
| 160 | #define __RAW_OP_ADD "laa" |
| 161 | |
| 162 | #define __RAW_LOCK(ptr, op_val, op_string) \ |
| 163 | ({ \ |
| 164 | unsigned int old_val; \ |
| 165 | \ |
| 166 | typecheck(unsigned int *, ptr); \ |
| 167 | asm volatile( \ |
| 168 | op_string " %0,%2,%1\n" \ |
| 169 | "bcr 14,0\n" \ |
| 170 | : "=d" (old_val), "+Q" (*ptr) \ |
| 171 | : "d" (op_val) \ |
| 172 | : "cc", "memory"); \ |
| 173 | old_val; \ |
| 174 | }) |
| 175 | |
| 176 | #define __RAW_UNLOCK(ptr, op_val, op_string) \ |
| 177 | ({ \ |
| 178 | unsigned int old_val; \ |
| 179 | \ |
| 180 | typecheck(unsigned int *, ptr); \ |
| 181 | asm volatile( \ |
Martin Schwidefsky | bbae71b | 2014-09-22 16:34:38 +0200 | [diff] [blame] | 182 | op_string " %0,%2,%1\n" \ |
| 183 | : "=d" (old_val), "+Q" (*ptr) \ |
| 184 | : "d" (op_val) \ |
| 185 | : "cc", "memory"); \ |
| 186 | old_val; \ |
| 187 | }) |
| 188 | |
| 189 | extern void _raw_read_lock_wait(arch_rwlock_t *lp); |
| 190 | extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev); |
| 191 | |
| 192 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 193 | { |
| 194 | unsigned int old; |
| 195 | |
| 196 | old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); |
| 197 | if ((int) old < 0) |
| 198 | _raw_read_lock_wait(rw); |
| 199 | } |
| 200 | |
| 201 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 202 | { |
| 203 | __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD); |
| 204 | } |
| 205 | |
| 206 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 207 | { |
| 208 | unsigned int old; |
| 209 | |
| 210 | old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); |
| 211 | if (old != 0) |
| 212 | _raw_write_lock_wait(rw, old); |
| 213 | rw->owner = SPINLOCK_LOCKVAL; |
| 214 | } |
| 215 | |
| 216 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 217 | { |
| 218 | rw->owner = 0; |
| 219 | __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND); |
| 220 | } |
| 221 | |
| 222 | #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ |
| 223 | |
| 224 | extern void _raw_read_lock_wait(arch_rwlock_t *lp); |
| 225 | extern void _raw_write_lock_wait(arch_rwlock_t *lp); |
| 226 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 227 | static inline void arch_read_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame] | 229 | if (!arch_read_trylock_once(rw)) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 230 | _raw_read_lock_wait(rw); |
| 231 | } |
| 232 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 233 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 234 | { |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 235 | unsigned int old; |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 236 | |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 237 | do { |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 238 | old = ACCESS_ONCE(rw->lock); |
| 239 | } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 240 | } |
| 241 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 242 | static inline void arch_write_lock(arch_rwlock_t *rw) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 243 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame] | 244 | if (!arch_write_trylock_once(rw)) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 245 | _raw_write_lock_wait(rw); |
Martin Schwidefsky | d59b93d | 2014-09-19 14:29:31 +0200 | [diff] [blame] | 246 | rw->owner = SPINLOCK_LOCKVAL; |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 247 | } |
| 248 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 249 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 250 | { |
Heiko Carstens | 4423028 | 2014-09-08 08:20:43 +0200 | [diff] [blame] | 251 | typecheck(unsigned int, rw->lock); |
Martin Schwidefsky | d59b93d | 2014-09-19 14:29:31 +0200 | [diff] [blame] | 252 | |
| 253 | rw->owner = 0; |
Heiko Carstens | 4423028 | 2014-09-08 08:20:43 +0200 | [diff] [blame] | 254 | asm volatile( |
Heiko Carstens | 4423028 | 2014-09-08 08:20:43 +0200 | [diff] [blame] | 255 | "st %1,%0\n" |
| 256 | : "+Q" (rw->lock) |
| 257 | : "d" (0) |
| 258 | : "cc", "memory"); |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 259 | } |
| 260 | |
Martin Schwidefsky | bbae71b | 2014-09-22 16:34:38 +0200 | [diff] [blame] | 261 | #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ |
| 262 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 263 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 264 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame] | 265 | if (!arch_read_trylock_once(rw)) |
| 266 | return _raw_read_trylock_retry(rw); |
| 267 | return 1; |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 268 | } |
| 269 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 270 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 271 | { |
Martin Schwidefsky | d59b93d | 2014-09-19 14:29:31 +0200 | [diff] [blame] | 272 | if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw)) |
| 273 | return 0; |
| 274 | rw->owner = SPINLOCK_LOCKVAL; |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame] | 275 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | } |
| 277 | |
Martin Schwidefsky | d59b93d | 2014-09-19 14:29:31 +0200 | [diff] [blame] | 278 | static inline void arch_read_relax(arch_rwlock_t *rw) |
| 279 | { |
| 280 | arch_lock_relax(rw->owner); |
| 281 | } |
| 282 | |
| 283 | static inline void arch_write_relax(arch_rwlock_t *rw) |
| 284 | { |
| 285 | arch_lock_relax(rw->owner); |
| 286 | } |
Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 287 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | #endif /* __ASM_SPINLOCK_H */ |