Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H |
| 2 | #define __ASM_SPINLOCK_H |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 3 | #ifdef __KERNEL__ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
| 5 | /* |
| 6 | * Simple spin lock operations. |
| 7 | * |
| 8 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM |
| 9 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM |
| 10 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM |
| 11 | * Rework to support virtual processors |
| 12 | * |
| 13 | * Type of int is used as a full 64b word is not necessary. |
| 14 | * |
| 15 | * This program is free software; you can redistribute it and/or |
| 16 | * modify it under the terms of the GNU General Public License |
| 17 | * as published by the Free Software Foundation; either version |
| 18 | * 2 of the License, or (at your option) any later version. |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 19 | * |
| 20 | * (the type definitions are in asm/spinlock_types.h) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | */ |
Benjamin Herrenschmidt | 945feb1 | 2008-04-17 14:35:01 +1000 | [diff] [blame] | 22 | #include <linux/irqflags.h> |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 23 | #ifdef CONFIG_PPC64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <asm/paca.h> |
| 25 | #include <asm/hvcall.h> |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 26 | #endif |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 27 | #include <asm/synch.h> |
Anton Blanchard | 4e14a4d | 2010-02-10 00:57:28 +0000 | [diff] [blame] | 28 | #include <asm/ppc-opcode.h> |
Christophe Leroy | 36a7eea | 2018-07-05 16:24:55 +0000 | [diff] [blame] | 29 | #include <asm/asm-405.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 31 | #ifdef CONFIG_PPC64 |
| 32 | /* use 0x800000yy when locked, where yy == CPU number */ |
Anton Blanchard | 54bb7f4 | 2013-08-07 02:01:51 +1000 | [diff] [blame] | 33 | #ifdef __BIG_ENDIAN__ |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 34 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) |
| 35 | #else |
Anton Blanchard | 54bb7f4 | 2013-08-07 02:01:51 +1000 | [diff] [blame] | 36 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) |
| 37 | #endif |
| 38 | #else |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 39 | #define LOCK_TOKEN 1 |
| 40 | #endif |
| 41 | |
Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 42 | #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) |
| 43 | #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) |
| 44 | #define SYNC_IO do { \ |
| 45 | if (unlikely(get_paca()->io_sync)) { \ |
| 46 | mb(); \ |
| 47 | get_paca()->io_sync = 0; \ |
| 48 | } \ |
| 49 | } while (0) |
| 50 | #else |
| 51 | #define CLEAR_IO_SYNC |
| 52 | #define SYNC_IO |
| 53 | #endif |
| 54 | |
Pan Xinhui | 41946c8 | 2016-11-02 05:08:31 -0400 | [diff] [blame] | 55 | #ifdef CONFIG_PPC_PSERIES |
Srikar Dronamraju | 04a2096 | 2019-12-05 14:02:17 +0530 | [diff] [blame^] | 56 | DECLARE_STATIC_KEY_FALSE(shared_processor); |
| 57 | |
Pan Xinhui | 41946c8 | 2016-11-02 05:08:31 -0400 | [diff] [blame] | 58 | #define vcpu_is_preempted vcpu_is_preempted |
| 59 | static inline bool vcpu_is_preempted(int cpu) |
| 60 | { |
Srikar Dronamraju | 04a2096 | 2019-12-05 14:02:17 +0530 | [diff] [blame^] | 61 | if (!static_branch_unlikely(&shared_processor)) |
Aneesh Kumar K.V | a6201da | 2018-04-02 13:03:37 +0530 | [diff] [blame] | 62 | return false; |
Pan Xinhui | 41946c8 | 2016-11-02 05:08:31 -0400 | [diff] [blame] | 63 | return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); |
| 64 | } |
| 65 | #endif |
| 66 | |
Michael Ellerman | 3405d23 | 2014-01-15 18:14:28 +1100 | [diff] [blame] | 67 | static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
| 68 | { |
| 69 | return lock.slock == 0; |
| 70 | } |
| 71 | |
Michael Ellerman | 7179ba5 | 2014-01-15 18:14:29 +1100 | [diff] [blame] | 72 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
| 73 | { |
Michael Ellerman | 51d7d52 | 2014-08-07 15:36:17 +1000 | [diff] [blame] | 74 | smp_mb(); |
Michael Ellerman | 7179ba5 | 2014-01-15 18:14:29 +1100 | [diff] [blame] | 75 | return !arch_spin_value_unlocked(*lock); |
| 76 | } |
| 77 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 78 | /* |
| 79 | * This returns the old value in the lock, so we succeeded |
| 80 | * in getting the lock if the return value is 0. |
| 81 | */ |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 82 | static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | { |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 84 | unsigned long tmp, token; |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 85 | |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 86 | token = LOCK_TOKEN; |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 87 | __asm__ __volatile__( |
Anton Blanchard | 4e14a4d | 2010-02-10 00:57:28 +0000 | [diff] [blame] | 88 | "1: " PPC_LWARX(%0,0,%2,1) "\n\ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 89 | cmpwi 0,%0,0\n\ |
| 90 | bne- 2f\n\ |
| 91 | stwcx. %1,0,%2\n\ |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 92 | bne- 1b\n" |
| 93 | PPC_ACQUIRE_BARRIER |
| 94 | "2:" |
| 95 | : "=&r" (tmp) |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 96 | : "r" (token), "r" (&lock->slock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 97 | : "cr0", "memory"); |
| 98 | |
| 99 | return tmp; |
| 100 | } |
| 101 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 102 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 103 | { |
Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 104 | CLEAR_IO_SYNC; |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 105 | return __arch_spin_trylock(lock) == 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | /* |
| 109 | * On a system with shared processors (that is, where a physical |
| 110 | * processor is multiplexed between several virtual processors), |
| 111 | * there is no point spinning on a lock if the holder of the lock |
| 112 | * isn't currently scheduled on a physical processor. Instead |
| 113 | * we detect this situation and ask the hypervisor to give the |
| 114 | * rest of our timeslice to the lock holder. |
| 115 | * |
| 116 | * So that we can tell which virtual processor is holding a lock, |
| 117 | * we put 0x80000000 | smp_processor_id() in the lock when it is |
| 118 | * held. Conveniently, we have a word in the paca that holds this |
| 119 | * value. |
| 120 | */ |
| 121 | |
Stephen Rothwell | 1b04188 | 2012-03-15 18:20:13 +0000 | [diff] [blame] | 122 | #if defined(CONFIG_PPC_SPLPAR) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | /* We only yield to the hypervisor if we are in shared processor mode */ |
Anton Blanchard | f13c13a | 2013-08-07 02:01:26 +1000 | [diff] [blame] | 124 | #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 125 | extern void __spin_yield(arch_spinlock_t *lock); |
Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 126 | extern void __rw_yield(arch_rwlock_t *lock); |
Stephen Rothwell | 1b04188 | 2012-03-15 18:20:13 +0000 | [diff] [blame] | 127 | #else /* SPLPAR */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | #define __spin_yield(x) barrier() |
| 129 | #define __rw_yield(x) barrier() |
| 130 | #define SHARED_PROCESSOR 0 |
| 131 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 133 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | { |
Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 135 | CLEAR_IO_SYNC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | while (1) { |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 137 | if (likely(__arch_spin_trylock(lock) == 0)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | break; |
| 139 | do { |
| 140 | HMT_low(); |
| 141 | if (SHARED_PROCESSOR) |
| 142 | __spin_yield(lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 143 | } while (unlikely(lock->slock != 0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | HMT_medium(); |
| 145 | } |
| 146 | } |
| 147 | |
Bart Van Assche | 89b5810 | 2008-06-28 16:51:35 +1000 | [diff] [blame] | 148 | static inline |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 149 | void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | { |
| 151 | unsigned long flags_dis; |
| 152 | |
Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 153 | CLEAR_IO_SYNC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | while (1) { |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 155 | if (likely(__arch_spin_trylock(lock) == 0)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | break; |
| 157 | local_save_flags(flags_dis); |
| 158 | local_irq_restore(flags); |
| 159 | do { |
| 160 | HMT_low(); |
| 161 | if (SHARED_PROCESSOR) |
| 162 | __spin_yield(lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 163 | } while (unlikely(lock->slock != 0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | HMT_medium(); |
| 165 | local_irq_restore(flags_dis); |
| 166 | } |
| 167 | } |
Will Deacon | a4c1887 | 2017-10-03 19:25:29 +0100 | [diff] [blame] | 168 | #define arch_spin_lock_flags arch_spin_lock_flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 170 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 171 | { |
Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 172 | SYNC_IO; |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 173 | __asm__ __volatile__("# arch_spin_unlock\n\t" |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 174 | PPC_RELEASE_BARRIER: : :"memory"); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 175 | lock->slock = 0; |
| 176 | } |
| 177 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | /* |
| 179 | * Read-write spinlocks, allowing multiple readers |
| 180 | * but only one writer. |
| 181 | * |
| 182 | * NOTE! it is quite common to have readers in interrupts |
| 183 | * but no interrupt writers. For those circumstances we |
| 184 | * can "mix" irq-safe locks - any writer needs to get a |
| 185 | * irq-safe write-lock, but readers can get non-irqsafe |
| 186 | * read-locks. |
| 187 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 189 | #ifdef CONFIG_PPC64 |
| 190 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" |
| 191 | #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ |
| 192 | #else |
| 193 | #define __DO_SIGN_EXTEND |
| 194 | #define WRLOCK_TOKEN (-1) |
| 195 | #endif |
| 196 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | /* |
| 198 | * This returns the old value in the lock + 1, |
| 199 | * so we got a read lock if the return value is > 0. |
| 200 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 201 | static inline long __arch_read_trylock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | { |
| 203 | long tmp; |
| 204 | |
| 205 | __asm__ __volatile__( |
Anton Blanchard | 4e14a4d | 2010-02-10 00:57:28 +0000 | [diff] [blame] | 206 | "1: " PPC_LWARX(%0,0,%1,1) "\n" |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 207 | __DO_SIGN_EXTEND |
| 208 | " addic. %0,%0,1\n\ |
| 209 | ble- 2f\n" |
| 210 | PPC405_ERR77(0,%1) |
| 211 | " stwcx. %0,0,%1\n\ |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 212 | bne- 1b\n" |
| 213 | PPC_ACQUIRE_BARRIER |
| 214 | "2:" : "=&r" (tmp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | : "r" (&rw->lock) |
| 216 | : "cr0", "xer", "memory"); |
| 217 | |
| 218 | return tmp; |
| 219 | } |
| 220 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | /* |
| 222 | * This returns the old value in the lock, |
| 223 | * so we got the write lock if the return value is 0. |
| 224 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 225 | static inline long __arch_write_trylock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | { |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 227 | long tmp, token; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 229 | token = WRLOCK_TOKEN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | __asm__ __volatile__( |
Anton Blanchard | 4e14a4d | 2010-02-10 00:57:28 +0000 | [diff] [blame] | 231 | "1: " PPC_LWARX(%0,0,%2,1) "\n\ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | cmpwi 0,%0,0\n\ |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 233 | bne- 2f\n" |
| 234 | PPC405_ERR77(0,%1) |
| 235 | " stwcx. %1,0,%2\n\ |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 236 | bne- 1b\n" |
| 237 | PPC_ACQUIRE_BARRIER |
| 238 | "2:" : "=&r" (tmp) |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 239 | : "r" (token), "r" (&rw->lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | : "cr0", "memory"); |
| 241 | |
| 242 | return tmp; |
| 243 | } |
| 244 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 245 | static inline void arch_read_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | { |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 247 | while (1) { |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 248 | if (likely(__arch_read_trylock(rw) > 0)) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 249 | break; |
| 250 | do { |
| 251 | HMT_low(); |
| 252 | if (SHARED_PROCESSOR) |
| 253 | __rw_yield(rw); |
| 254 | } while (unlikely(rw->lock < 0)); |
| 255 | HMT_medium(); |
| 256 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | } |
| 258 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 259 | static inline void arch_write_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | { |
| 261 | while (1) { |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 262 | if (likely(__arch_write_trylock(rw) == 0)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | break; |
| 264 | do { |
| 265 | HMT_low(); |
| 266 | if (SHARED_PROCESSOR) |
| 267 | __rw_yield(rw); |
Jake Moilanen | d637413 | 2005-05-01 08:58:47 -0700 | [diff] [blame] | 268 | } while (unlikely(rw->lock != 0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | HMT_medium(); |
| 270 | } |
| 271 | } |
| 272 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 273 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 274 | { |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 275 | return __arch_read_trylock(rw) > 0; |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 276 | } |
| 277 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 278 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 279 | { |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 280 | return __arch_write_trylock(rw) == 0; |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 281 | } |
| 282 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 283 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 284 | { |
| 285 | long tmp; |
| 286 | |
| 287 | __asm__ __volatile__( |
Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 288 | "# read_unlock\n\t" |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 289 | PPC_RELEASE_BARRIER |
Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 290 | "1: lwarx %0,0,%1\n\ |
Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 291 | addic %0,%0,-1\n" |
| 292 | PPC405_ERR77(0,%1) |
| 293 | " stwcx. %0,0,%1\n\ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 294 | bne- 1b" |
| 295 | : "=&r"(tmp) |
| 296 | : "r"(&rw->lock) |
Paul Mackerras | efc3624 | 2008-11-05 18:39:27 +0000 | [diff] [blame] | 297 | : "cr0", "xer", "memory"); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 298 | } |
| 299 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 300 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 301 | { |
Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 302 | __asm__ __volatile__("# write_unlock\n\t" |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 303 | PPC_RELEASE_BARRIER: : :"memory"); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 304 | rw->lock = 0; |
| 305 | } |
| 306 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 307 | #define arch_spin_relax(lock) __spin_yield(lock) |
| 308 | #define arch_read_relax(lock) __rw_yield(lock) |
| 309 | #define arch_write_relax(lock) __rw_yield(lock) |
Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 310 | |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 311 | /* See include/linux/spinlock.h */ |
| 312 | #define smp_mb__after_spinlock() smp_mb() |
| 313 | |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 314 | #endif /* __KERNEL__ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | #endif /* __ASM_SPINLOCK_H */ |