Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * S390 version |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 3 | * Copyright IBM Corp. 1999 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 5 | * |
| 6 | * Derived from "include/asm-i386/spinlock.h" |
| 7 | */ |
| 8 | |
| 9 | #ifndef __ASM_SPINLOCK_H |
| 10 | #define __ASM_SPINLOCK_H |
| 11 | |
Martin Schwidefsky | 3c1fcfe | 2006-09-30 23:27:45 -0700 | [diff] [blame] | 12 | #include <linux/smp.h> |
| 13 | |
Philipp Hachtmann | 6c8cd5b | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 14 | #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) |
| 15 | |
Martin Schwidefsky | 638ad34 | 2011-10-30 15:17:13 +0100 | [diff] [blame] | 16 | extern int spin_retry; |
| 17 | |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 18 | static inline int |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 19 | _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 20 | { |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 21 | unsigned int old_expected = old; |
| 22 | |
Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 23 | asm volatile( |
| 24 | " cs %0,%3,%1" |
| 25 | : "=d" (old), "=Q" (*lock) |
| 26 | : "0" (old), "d" (new), "Q" (*lock) |
| 27 | : "cc", "memory" ); |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 28 | return old == old_expected; |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 29 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
| 31 | /* |
| 32 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 33 | * on the local processor, one does not. |
| 34 | * |
| 35 | * We make no fairness assumptions. They have a cost. |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 36 | * |
| 37 | * (the type definitions are in asm/spinlock_types.h) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | */ |
| 39 | |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 40 | void arch_spin_lock_wait(arch_spinlock_t *); |
| 41 | int arch_spin_trylock_retry(arch_spinlock_t *); |
| 42 | void arch_spin_relax(arch_spinlock_t *); |
| 43 | void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Philipp Hachtmann | 6c8cd5b | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 45 | static inline u32 arch_spin_lockval(int cpu) |
| 46 | { |
| 47 | return ~cpu; |
| 48 | } |
| 49 | |
Heiko Carstens | efc1d23 | 2013-09-05 13:26:17 +0200 | [diff] [blame] | 50 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
| 51 | { |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 52 | return lock.lock == 0; |
| 53 | } |
| 54 | |
| 55 | static inline int arch_spin_is_locked(arch_spinlock_t *lp) |
| 56 | { |
| 57 | return ACCESS_ONCE(lp->lock) != 0; |
| 58 | } |
| 59 | |
| 60 | static inline int arch_spin_trylock_once(arch_spinlock_t *lp) |
| 61 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 62 | barrier(); |
| 63 | return likely(arch_spin_value_unlocked(*lp) && |
| 64 | _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 65 | } |
| 66 | |
| 67 | static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp) |
| 68 | { |
Philipp Hachtmann | 6c8cd5b | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 69 | return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0); |
Heiko Carstens | efc1d23 | 2013-09-05 13:26:17 +0200 | [diff] [blame] | 70 | } |
| 71 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 72 | static inline void arch_spin_lock(arch_spinlock_t *lp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 74 | if (!arch_spin_trylock_once(lp)) |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 75 | arch_spin_lock_wait(lp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 78 | static inline void arch_spin_lock_flags(arch_spinlock_t *lp, |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 79 | unsigned long flags) |
Hisashi Hifumi | 894cdde | 2008-01-26 14:11:28 +0100 | [diff] [blame] | 80 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 81 | if (!arch_spin_trylock_once(lp)) |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 82 | arch_spin_lock_wait_flags(lp, flags); |
Hisashi Hifumi | 894cdde | 2008-01-26 14:11:28 +0100 | [diff] [blame] | 83 | } |
| 84 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 85 | static inline int arch_spin_trylock(arch_spinlock_t *lp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 87 | if (!arch_spin_trylock_once(lp)) |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 88 | return arch_spin_trylock_retry(lp); |
| 89 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | } |
| 91 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 92 | static inline void arch_spin_unlock(arch_spinlock_t *lp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | { |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 94 | arch_spin_tryrelease_once(lp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | } |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 96 | |
| 97 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
| 98 | { |
| 99 | while (arch_spin_is_locked(lock)) |
| 100 | arch_spin_relax(lock); |
| 101 | } |
| 102 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | /* |
| 104 | * Read-write spinlocks, allowing multiple readers |
| 105 | * but only one writer. |
| 106 | * |
| 107 | * NOTE! it is quite common to have readers in interrupts |
| 108 | * but no interrupt writers. For those circumstances we |
| 109 | * can "mix" irq-safe locks - any writer needs to get a |
| 110 | * irq-safe write-lock, but readers can get non-irqsafe |
| 111 | * read-locks. |
| 112 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
| 114 | /** |
| 115 | * read_can_lock - would read_trylock() succeed? |
| 116 | * @lock: the rwlock in question. |
| 117 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 118 | #define arch_read_can_lock(x) ((int)(x)->lock >= 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
| 120 | /** |
| 121 | * write_can_lock - would write_trylock() succeed? |
| 122 | * @lock: the rwlock in question. |
| 123 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 124 | #define arch_write_can_lock(x) ((x)->lock == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | |
Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 126 | extern void _raw_read_lock_wait(arch_rwlock_t *lp); |
| 127 | extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); |
| 128 | extern int _raw_read_trylock_retry(arch_rwlock_t *lp); |
| 129 | extern void _raw_write_lock_wait(arch_rwlock_t *lp); |
| 130 | extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); |
| 131 | extern int _raw_write_trylock_retry(arch_rwlock_t *lp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 133 | static inline int arch_read_trylock_once(arch_rwlock_t *rw) |
| 134 | { |
| 135 | unsigned int old = ACCESS_ONCE(rw->lock); |
| 136 | return likely((int) old >= 0 && |
| 137 | _raw_compare_and_swap(&rw->lock, old, old + 1)); |
| 138 | } |
| 139 | |
| 140 | static inline int arch_write_trylock_once(arch_rwlock_t *rw) |
| 141 | { |
| 142 | unsigned int old = ACCESS_ONCE(rw->lock); |
| 143 | return likely(old == 0 && |
| 144 | _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); |
| 145 | } |
| 146 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 147 | static inline void arch_read_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 149 | if (!arch_read_trylock_once(rw)) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 150 | _raw_read_lock_wait(rw); |
| 151 | } |
| 152 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 153 | static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) |
Heiko Carstens | ce58ae6 | 2009-06-12 10:26:22 +0200 | [diff] [blame] | 154 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 155 | if (!arch_read_trylock_once(rw)) |
Heiko Carstens | ce58ae6 | 2009-06-12 10:26:22 +0200 | [diff] [blame] | 156 | _raw_read_lock_wait_flags(rw, flags); |
| 157 | } |
| 158 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 159 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 160 | { |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 161 | unsigned int old; |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 162 | |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 163 | do { |
Philipp Hachtmann | 5b3f683 | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 164 | old = ACCESS_ONCE(rw->lock); |
| 165 | } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 166 | } |
| 167 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 168 | static inline void arch_write_lock(arch_rwlock_t *rw) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 169 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 170 | if (!arch_write_trylock_once(rw)) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 171 | _raw_write_lock_wait(rw); |
| 172 | } |
| 173 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 174 | static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) |
Heiko Carstens | ce58ae6 | 2009-06-12 10:26:22 +0200 | [diff] [blame] | 175 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 176 | if (!arch_write_trylock_once(rw)) |
Heiko Carstens | ce58ae6 | 2009-06-12 10:26:22 +0200 | [diff] [blame] | 177 | _raw_write_lock_wait_flags(rw, flags); |
| 178 | } |
| 179 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 180 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 181 | { |
| 182 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); |
| 183 | } |
| 184 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 185 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 186 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 187 | if (!arch_read_trylock_once(rw)) |
| 188 | return _raw_read_trylock_retry(rw); |
| 189 | return 1; |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 190 | } |
| 191 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 192 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
Martin Schwidefsky | 951f22d | 2005-07-27 11:44:57 -0700 | [diff] [blame] | 193 | { |
Martin Schwidefsky | bae8f56 | 2014-05-15 11:00:44 +0200 | [diff] [blame^] | 194 | if (!arch_write_trylock_once(rw)) |
| 195 | return _raw_write_trylock_retry(rw); |
| 196 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | } |
| 198 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 199 | #define arch_read_relax(lock) cpu_relax() |
| 200 | #define arch_write_relax(lock) cpu_relax() |
Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 201 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | #endif /* __ASM_SPINLOCK_H */ |