| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H | 
 | 2 | #define __ASM_SPINLOCK_H | 
| Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 3 | #ifdef __KERNEL__ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 |  | 
 | 5 | /* | 
 | 6 |  * Simple spin lock operations.   | 
 | 7 |  * | 
 | 8 |  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM | 
 | 9 |  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | 
 | 10 |  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM | 
 | 11 |  *	Rework to support virtual processors | 
 | 12 |  * | 
 | 13 |  * Type of int is used as a full 64b word is not necessary. | 
 | 14 |  * | 
 | 15 |  * This program is free software; you can redistribute it and/or | 
 | 16 |  * modify it under the terms of the GNU General Public License | 
 | 17 |  * as published by the Free Software Foundation; either version | 
 | 18 |  * 2 of the License, or (at your option) any later version. | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 19 |  * | 
 | 20 |  * (the type definitions are in asm/spinlock_types.h) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 |  */ | 
| Benjamin Herrenschmidt | 945feb1 | 2008-04-17 14:35:01 +1000 | [diff] [blame] | 22 | #include <linux/irqflags.h> | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 23 | #ifdef CONFIG_PPC64 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <asm/paca.h> | 
 | 25 | #include <asm/hvcall.h> | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 26 | #endif | 
 | 27 | #include <asm/asm-compat.h> | 
 | 28 | #include <asm/synch.h> | 
| Anton Blanchard | 4e14a4d | 2010-02-10 00:57:28 +0000 | [diff] [blame] | 29 | #include <asm/ppc-opcode.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 |  | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 31 | #ifdef CONFIG_PPC64 | 
 | 32 | /* use 0x800000yy when locked, where yy == CPU number */ | 
| Anton Blanchard | 54bb7f4 | 2013-08-07 02:01:51 +1000 | [diff] [blame] | 33 | #ifdef __BIG_ENDIAN__ | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 34 | #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token)) | 
 | 35 | #else | 
| Anton Blanchard | 54bb7f4 | 2013-08-07 02:01:51 +1000 | [diff] [blame] | 36 | #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index)) | 
 | 37 | #endif | 
 | 38 | #else | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 39 | #define LOCK_TOKEN	1 | 
 | 40 | #endif | 
 | 41 |  | 
| Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 42 | #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) | 
 | 43 | #define CLEAR_IO_SYNC	(get_paca()->io_sync = 0) | 
 | 44 | #define SYNC_IO		do {						\ | 
 | 45 | 				if (unlikely(get_paca()->io_sync)) {	\ | 
 | 46 | 					mb();				\ | 
 | 47 | 					get_paca()->io_sync = 0;	\ | 
 | 48 | 				}					\ | 
 | 49 | 			} while (0) | 
 | 50 | #else | 
 | 51 | #define CLEAR_IO_SYNC | 
 | 52 | #define SYNC_IO | 
 | 53 | #endif | 
 | 54 |  | 
| Pan Xinhui | 41946c8 | 2016-11-02 05:08:31 -0400 | [diff] [blame] | 55 | #ifdef CONFIG_PPC_PSERIES | 
 | 56 | #define vcpu_is_preempted vcpu_is_preempted | 
 | 57 | static inline bool vcpu_is_preempted(int cpu) | 
 | 58 | { | 
 | 59 | 	return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); | 
 | 60 | } | 
 | 61 | #endif | 
 | 62 |  | 
| Michael Ellerman | 3405d23 | 2014-01-15 18:14:28 +1100 | [diff] [blame] | 63 | static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) | 
 | 64 | { | 
 | 65 | 	return lock.slock == 0; | 
 | 66 | } | 
 | 67 |  | 
| Michael Ellerman | 7179ba5 | 2014-01-15 18:14:29 +1100 | [diff] [blame] | 68 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | 
 | 69 | { | 
| Michael Ellerman | 51d7d52 | 2014-08-07 15:36:17 +1000 | [diff] [blame] | 70 | 	smp_mb(); | 
| Michael Ellerman | 7179ba5 | 2014-01-15 18:14:29 +1100 | [diff] [blame] | 71 | 	return !arch_spin_value_unlocked(*lock); | 
 | 72 | } | 
 | 73 |  | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 74 | /* | 
 | 75 |  * This returns the old value in the lock, so we succeeded | 
 | 76 |  * in getting the lock if the return value is 0. | 
 | 77 |  */ | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 78 | static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | { | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 80 | 	unsigned long tmp, token; | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 81 |  | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 82 | 	token = LOCK_TOKEN; | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 83 | 	__asm__ __volatile__( | 
| Anton Blanchard | 4e14a4d | 2010-02-10 00:57:28 +0000 | [diff] [blame] | 84 | "1:	" PPC_LWARX(%0,0,%2,1) "\n\ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 85 | 	cmpwi		0,%0,0\n\ | 
 | 86 | 	bne-		2f\n\ | 
 | 87 | 	stwcx.		%1,0,%2\n\ | 
| Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 88 | 	bne-		1b\n" | 
 | 89 | 	PPC_ACQUIRE_BARRIER | 
 | 90 | "2:" | 
 | 91 | 	: "=&r" (tmp) | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 92 | 	: "r" (token), "r" (&lock->slock) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 93 | 	: "cr0", "memory"); | 
 | 94 |  | 
 | 95 | 	return tmp; | 
 | 96 | } | 
 | 97 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 99 | { | 
| Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 100 | 	CLEAR_IO_SYNC; | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 101 | 	return __arch_spin_trylock(lock) == 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | } | 
 | 103 |  | 
 | 104 | /* | 
 | 105 |  * On a system with shared processors (that is, where a physical | 
 | 106 |  * processor is multiplexed between several virtual processors), | 
 | 107 |  * there is no point spinning on a lock if the holder of the lock | 
 | 108 |  * isn't currently scheduled on a physical processor.  Instead | 
 | 109 |  * we detect this situation and ask the hypervisor to give the | 
 | 110 |  * rest of our timeslice to the lock holder. | 
 | 111 |  * | 
 | 112 |  * So that we can tell which virtual processor is holding a lock, | 
 | 113 |  * we put 0x80000000 | smp_processor_id() in the lock when it is | 
 | 114 |  * held.  Conveniently, we have a word in the paca that holds this | 
 | 115 |  * value. | 
 | 116 |  */ | 
 | 117 |  | 
| Stephen Rothwell | 1b04188 | 2012-03-15 18:20:13 +0000 | [diff] [blame] | 118 | #if defined(CONFIG_PPC_SPLPAR) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | /* We only yield to the hypervisor if we are in shared processor mode */ | 
| Anton Blanchard | f13c13a | 2013-08-07 02:01:26 +1000 | [diff] [blame] | 120 | #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 121 | extern void __spin_yield(arch_spinlock_t *lock); | 
| Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 122 | extern void __rw_yield(arch_rwlock_t *lock); | 
| Stephen Rothwell | 1b04188 | 2012-03-15 18:20:13 +0000 | [diff] [blame] | 123 | #else /* SPLPAR */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | #define __spin_yield(x)	barrier() | 
 | 125 | #define __rw_yield(x)	barrier() | 
 | 126 | #define SHARED_PROCESSOR	0 | 
 | 127 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 129 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | { | 
| Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 131 | 	CLEAR_IO_SYNC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | 	while (1) { | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 133 | 		if (likely(__arch_spin_trylock(lock) == 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | 			break; | 
 | 135 | 		do { | 
 | 136 | 			HMT_low(); | 
 | 137 | 			if (SHARED_PROCESSOR) | 
 | 138 | 				__spin_yield(lock); | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 139 | 		} while (unlikely(lock->slock != 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | 		HMT_medium(); | 
 | 141 | 	} | 
 | 142 | } | 
 | 143 |  | 
| Bart Van Assche | 89b5810 | 2008-06-28 16:51:35 +1000 | [diff] [blame] | 144 | static inline | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 145 | void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | { | 
 | 147 | 	unsigned long flags_dis; | 
 | 148 |  | 
| Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 149 | 	CLEAR_IO_SYNC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | 	while (1) { | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 151 | 		if (likely(__arch_spin_trylock(lock) == 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | 			break; | 
 | 153 | 		local_save_flags(flags_dis); | 
 | 154 | 		local_irq_restore(flags); | 
 | 155 | 		do { | 
 | 156 | 			HMT_low(); | 
 | 157 | 			if (SHARED_PROCESSOR) | 
 | 158 | 				__spin_yield(lock); | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 159 | 		} while (unlikely(lock->slock != 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | 		HMT_medium(); | 
 | 161 | 		local_irq_restore(flags_dis); | 
 | 162 | 	} | 
 | 163 | } | 
| Will Deacon | a4c1887 | 2017-10-03 19:25:29 +0100 | [diff] [blame^] | 164 | #define arch_spin_lock_flags arch_spin_lock_flags | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 166 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 167 | { | 
| Paul Mackerras | f007cac | 2006-09-13 22:08:26 +1000 | [diff] [blame] | 168 | 	SYNC_IO; | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 169 | 	__asm__ __volatile__("# arch_spin_unlock\n\t" | 
| Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 170 | 				PPC_RELEASE_BARRIER: : :"memory"); | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 171 | 	lock->slock = 0; | 
 | 172 | } | 
 | 173 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | /* | 
 | 175 |  * Read-write spinlocks, allowing multiple readers | 
 | 176 |  * but only one writer. | 
 | 177 |  * | 
 | 178 |  * NOTE! it is quite common to have readers in interrupts | 
 | 179 |  * but no interrupt writers. For those circumstances we | 
 | 180 |  * can "mix" irq-safe locks - any writer needs to get a | 
 | 181 |  * irq-safe write-lock, but readers can get non-irqsafe | 
 | 182 |  * read-locks. | 
 | 183 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 |  | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 185 | #ifdef CONFIG_PPC64 | 
 | 186 | #define __DO_SIGN_EXTEND	"extsw	%0,%0\n" | 
 | 187 | #define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */ | 
 | 188 | #else | 
 | 189 | #define __DO_SIGN_EXTEND | 
 | 190 | #define WRLOCK_TOKEN		(-1) | 
 | 191 | #endif | 
 | 192 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | /* | 
 | 194 |  * This returns the old value in the lock + 1, | 
 | 195 |  * so we got a read lock if the return value is > 0. | 
 | 196 |  */ | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 197 | static inline long __arch_read_trylock(arch_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | { | 
 | 199 | 	long tmp; | 
 | 200 |  | 
 | 201 | 	__asm__ __volatile__( | 
| Anton Blanchard | 4e14a4d | 2010-02-10 00:57:28 +0000 | [diff] [blame] | 202 | "1:	" PPC_LWARX(%0,0,%1,1) "\n" | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 203 | 	__DO_SIGN_EXTEND | 
 | 204 | "	addic.		%0,%0,1\n\ | 
 | 205 | 	ble-		2f\n" | 
 | 206 | 	PPC405_ERR77(0,%1) | 
 | 207 | "	stwcx.		%0,0,%1\n\ | 
| Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 208 | 	bne-		1b\n" | 
 | 209 | 	PPC_ACQUIRE_BARRIER | 
 | 210 | "2:"	: "=&r" (tmp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | 	: "r" (&rw->lock) | 
 | 212 | 	: "cr0", "xer", "memory"); | 
 | 213 |  | 
 | 214 | 	return tmp; | 
 | 215 | } | 
 | 216 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | /* | 
 | 218 |  * This returns the old value in the lock, | 
 | 219 |  * so we got the write lock if the return value is 0. | 
 | 220 |  */ | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 221 | static inline long __arch_write_trylock(arch_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | { | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 223 | 	long tmp, token; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 |  | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 225 | 	token = WRLOCK_TOKEN; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | 	__asm__ __volatile__( | 
| Anton Blanchard | 4e14a4d | 2010-02-10 00:57:28 +0000 | [diff] [blame] | 227 | "1:	" PPC_LWARX(%0,0,%2,1) "\n\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | 	cmpwi		0,%0,0\n\ | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 229 | 	bne-		2f\n" | 
 | 230 | 	PPC405_ERR77(0,%1) | 
 | 231 | "	stwcx.		%1,0,%2\n\ | 
| Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 232 | 	bne-		1b\n" | 
 | 233 | 	PPC_ACQUIRE_BARRIER | 
 | 234 | "2:"	: "=&r" (tmp) | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 235 | 	: "r" (token), "r" (&rw->lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | 	: "cr0", "memory"); | 
 | 237 |  | 
 | 238 | 	return tmp; | 
 | 239 | } | 
 | 240 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 241 | static inline void arch_read_lock(arch_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | { | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 243 | 	while (1) { | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 244 | 		if (likely(__arch_read_trylock(rw) > 0)) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 245 | 			break; | 
 | 246 | 		do { | 
 | 247 | 			HMT_low(); | 
 | 248 | 			if (SHARED_PROCESSOR) | 
 | 249 | 				__rw_yield(rw); | 
 | 250 | 		} while (unlikely(rw->lock < 0)); | 
 | 251 | 		HMT_medium(); | 
 | 252 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | } | 
 | 254 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 255 | static inline void arch_write_lock(arch_rwlock_t *rw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | { | 
 | 257 | 	while (1) { | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 258 | 		if (likely(__arch_write_trylock(rw) == 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | 			break; | 
 | 260 | 		do { | 
 | 261 | 			HMT_low(); | 
 | 262 | 			if (SHARED_PROCESSOR) | 
 | 263 | 				__rw_yield(rw); | 
| Jake Moilanen | d637413 | 2005-05-01 08:58:47 -0700 | [diff] [blame] | 264 | 		} while (unlikely(rw->lock != 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | 		HMT_medium(); | 
 | 266 | 	} | 
 | 267 | } | 
 | 268 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 269 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 270 | { | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 271 | 	return __arch_read_trylock(rw) > 0; | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 272 | } | 
 | 273 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 274 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 275 | { | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 276 | 	return __arch_write_trylock(rw) == 0; | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 277 | } | 
 | 278 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 279 | static inline void arch_read_unlock(arch_rwlock_t *rw) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 280 | { | 
 | 281 | 	long tmp; | 
 | 282 |  | 
 | 283 | 	__asm__ __volatile__( | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 284 | 	"# read_unlock\n\t" | 
| Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 285 | 	PPC_RELEASE_BARRIER | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 286 | "1:	lwarx		%0,0,%1\n\ | 
| Paul Mackerras | 0212ddd | 2005-11-19 20:50:46 +1100 | [diff] [blame] | 287 | 	addic		%0,%0,-1\n" | 
 | 288 | 	PPC405_ERR77(0,%1) | 
 | 289 | "	stwcx.		%0,0,%1\n\ | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 290 | 	bne-		1b" | 
 | 291 | 	: "=&r"(tmp) | 
 | 292 | 	: "r"(&rw->lock) | 
| Paul Mackerras | efc3624 | 2008-11-05 18:39:27 +0000 | [diff] [blame] | 293 | 	: "cr0", "xer", "memory"); | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 294 | } | 
 | 295 |  | 
| Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 296 | static inline void arch_write_unlock(arch_rwlock_t *rw) | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 297 | { | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 298 | 	__asm__ __volatile__("# write_unlock\n\t" | 
| Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 299 | 				PPC_RELEASE_BARRIER: : :"memory"); | 
| Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 300 | 	rw->lock = 0; | 
 | 301 | } | 
 | 302 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 303 | #define arch_spin_relax(lock)	__spin_yield(lock) | 
 | 304 | #define arch_read_relax(lock)	__rw_yield(lock) | 
 | 305 | #define arch_write_relax(lock)	__rw_yield(lock) | 
| Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 306 |  | 
| Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 307 | /* See include/linux/spinlock.h */ | 
 | 308 | #define smp_mb__after_spinlock()   smp_mb() | 
 | 309 |  | 
| Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 310 | #endif /* __KERNEL__ */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | #endif /* __ASM_SPINLOCK_H */ |