blob: fa37fe93bc029eb9965754021c027b2de484b809 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
Arnd Bergmann88ced032005-12-16 22:43:46 +01003#ifdef __KERNEL__
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5/*
6 * Simple spin lock operations.
7 *
8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11 * Rework to support virtual processors
12 *
13 * Type of int is used as a full 64b word is not necessary.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070019 *
20 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100022#include <linux/irqflags.h>
Paul Mackerras0212ddd2005-11-19 20:50:46 +110023#ifdef CONFIG_PPC64
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/paca.h>
25#include <asm/hvcall.h>
Paul Mackerras0212ddd2005-11-19 20:50:46 +110026#endif
27#include <asm/asm-compat.h>
28#include <asm/synch.h>
Anton Blanchard4e14a4d2010-02-10 00:57:28 +000029#include <asm/ppc-opcode.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Paul Mackerras0212ddd2005-11-19 20:50:46 +110031#ifdef CONFIG_PPC64
32/* use 0x800000yy when locked, where yy == CPU number */
Anton Blanchard54bb7f42013-08-07 02:01:51 +100033#ifdef __BIG_ENDIAN__
Paul Mackerras0212ddd2005-11-19 20:50:46 +110034#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
35#else
Anton Blanchard54bb7f42013-08-07 02:01:51 +100036#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
37#endif
38#else
Paul Mackerras0212ddd2005-11-19 20:50:46 +110039#define LOCK_TOKEN 1
40#endif
41
Paul Mackerrasf007cac2006-09-13 22:08:26 +100042#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
43#define CLEAR_IO_SYNC (get_paca()->io_sync = 0)
44#define SYNC_IO do { \
45 if (unlikely(get_paca()->io_sync)) { \
46 mb(); \
47 get_paca()->io_sync = 0; \
48 } \
49 } while (0)
50#else
51#define CLEAR_IO_SYNC
52#define SYNC_IO
53#endif
54
Michael Ellerman3405d232014-01-15 18:14:28 +110055static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
56{
57 return lock.slock == 0;
58}
59
Michael Ellerman7179ba52014-01-15 18:14:29 +110060static inline int arch_spin_is_locked(arch_spinlock_t *lock)
61{
Michael Ellerman51d7d522014-08-07 15:36:17 +100062 smp_mb();
Michael Ellerman7179ba52014-01-15 18:14:29 +110063 return !arch_spin_value_unlocked(*lock);
64}
65
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070066/*
67 * This returns the old value in the lock, so we succeeded
68 * in getting the lock if the return value is 0.
69 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010070static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Paul Mackerras0212ddd2005-11-19 20:50:46 +110072 unsigned long tmp, token;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070073
Paul Mackerras0212ddd2005-11-19 20:50:46 +110074 token = LOCK_TOKEN;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070075 __asm__ __volatile__(
Anton Blanchard4e14a4d2010-02-10 00:57:28 +000076"1: " PPC_LWARX(%0,0,%2,1) "\n\
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070077 cmpwi 0,%0,0\n\
78 bne- 2f\n\
79 stwcx. %1,0,%2\n\
Anton Blanchardf10e2e52010-02-10 01:04:06 +000080 bne- 1b\n"
81 PPC_ACQUIRE_BARRIER
82"2:"
83 : "=&r" (tmp)
Paul Mackerras0212ddd2005-11-19 20:50:46 +110084 : "r" (token), "r" (&lock->slock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070085 : "cr0", "memory");
86
87 return tmp;
88}
89
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010090static inline int arch_spin_trylock(arch_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070091{
Paul Mackerrasf007cac2006-09-13 22:08:26 +100092 CLEAR_IO_SYNC;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010093 return __arch_spin_trylock(lock) == 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
96/*
97 * On a system with shared processors (that is, where a physical
98 * processor is multiplexed between several virtual processors),
99 * there is no point spinning on a lock if the holder of the lock
100 * isn't currently scheduled on a physical processor. Instead
101 * we detect this situation and ask the hypervisor to give the
102 * rest of our timeslice to the lock holder.
103 *
104 * So that we can tell which virtual processor is holding a lock,
105 * we put 0x80000000 | smp_processor_id() in the lock when it is
106 * held. Conveniently, we have a word in the paca that holds this
107 * value.
108 */
109
Stephen Rothwell1b041882012-03-15 18:20:13 +0000110#if defined(CONFIG_PPC_SPLPAR)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111/* We only yield to the hypervisor if we are in shared processor mode */
Anton Blanchardf13c13a2013-08-07 02:01:26 +1000112#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
Thomas Gleixner445c8952009-12-02 19:49:50 +0100113extern void __spin_yield(arch_spinlock_t *lock);
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100114extern void __rw_yield(arch_rwlock_t *lock);
Stephen Rothwell1b041882012-03-15 18:20:13 +0000115#else /* SPLPAR */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#define __spin_yield(x) barrier()
117#define __rw_yield(x) barrier()
118#define SHARED_PROCESSOR 0
119#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100121static inline void arch_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000123 CLEAR_IO_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 while (1) {
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100125 if (likely(__arch_spin_trylock(lock) == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 break;
127 do {
128 HMT_low();
129 if (SHARED_PROCESSOR)
130 __spin_yield(lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700131 } while (unlikely(lock->slock != 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 HMT_medium();
133 }
134}
135
Bart Van Assche89b58102008-06-28 16:51:35 +1000136static inline
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100137void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
139 unsigned long flags_dis;
140
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000141 CLEAR_IO_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 while (1) {
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100143 if (likely(__arch_spin_trylock(lock) == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 break;
145 local_save_flags(flags_dis);
146 local_irq_restore(flags);
147 do {
148 HMT_low();
149 if (SHARED_PROCESSOR)
150 __spin_yield(lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700151 } while (unlikely(lock->slock != 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 HMT_medium();
153 local_irq_restore(flags_dis);
154 }
155}
156
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100157static inline void arch_spin_unlock(arch_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700158{
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000159 SYNC_IO;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100160 __asm__ __volatile__("# arch_spin_unlock\n\t"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000161 PPC_RELEASE_BARRIER: : :"memory");
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700162 lock->slock = 0;
163}
164
Boqun Feng6262db72016-06-10 11:51:28 +0800165static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
166{
167 arch_spinlock_t lock_val;
168
169 smp_mb();
170
171 /*
172 * Atomically load and store back the lock value (unchanged). This
173 * ensures that our observation of the lock value is ordered with
174 * respect to other lock operations.
175 */
176 __asm__ __volatile__(
177"1: " PPC_LWARX(%0, 0, %2, 0) "\n"
178" stwcx. %0, 0, %2\n"
179" bne- 1b\n"
180 : "=&r" (lock_val), "+m" (*lock)
181 : "r" (lock)
182 : "cr0", "xer");
183
184 if (arch_spin_value_unlocked(lock_val))
185 goto out;
186
187 while (lock->slock) {
188 HMT_low();
189 if (SHARED_PROCESSOR)
190 __spin_yield(lock);
191 }
192 HMT_medium();
193
194out:
195 smp_mb();
196}
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/*
199 * Read-write spinlocks, allowing multiple readers
200 * but only one writer.
201 *
202 * NOTE! it is quite common to have readers in interrupts
203 * but no interrupt writers. For those circumstances we
204 * can "mix" irq-safe locks - any writer needs to get a
205 * irq-safe write-lock, but readers can get non-irqsafe
206 * read-locks.
207 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Thomas Gleixnere5931942009-12-03 20:08:46 +0100209#define arch_read_can_lock(rw) ((rw)->lock >= 0)
210#define arch_write_can_lock(rw) (!(rw)->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100212#ifdef CONFIG_PPC64
213#define __DO_SIGN_EXTEND "extsw %0,%0\n"
214#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
215#else
216#define __DO_SIGN_EXTEND
217#define WRLOCK_TOKEN (-1)
218#endif
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220/*
221 * This returns the old value in the lock + 1,
222 * so we got a read lock if the return value is > 0.
223 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100224static inline long __arch_read_trylock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
226 long tmp;
227
228 __asm__ __volatile__(
Anton Blanchard4e14a4d2010-02-10 00:57:28 +0000229"1: " PPC_LWARX(%0,0,%1,1) "\n"
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100230 __DO_SIGN_EXTEND
231" addic. %0,%0,1\n\
232 ble- 2f\n"
233 PPC405_ERR77(0,%1)
234" stwcx. %0,0,%1\n\
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000235 bne- 1b\n"
236 PPC_ACQUIRE_BARRIER
237"2:" : "=&r" (tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 : "r" (&rw->lock)
239 : "cr0", "xer", "memory");
240
241 return tmp;
242}
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244/*
245 * This returns the old value in the lock,
246 * so we got the write lock if the return value is 0.
247 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100248static inline long __arch_write_trylock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249{
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100250 long tmp, token;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100252 token = WRLOCK_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 __asm__ __volatile__(
Anton Blanchard4e14a4d2010-02-10 00:57:28 +0000254"1: " PPC_LWARX(%0,0,%2,1) "\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 cmpwi 0,%0,0\n\
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100256 bne- 2f\n"
257 PPC405_ERR77(0,%1)
258" stwcx. %1,0,%2\n\
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000259 bne- 1b\n"
260 PPC_ACQUIRE_BARRIER
261"2:" : "=&r" (tmp)
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100262 : "r" (token), "r" (&rw->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 : "cr0", "memory");
264
265 return tmp;
266}
267
Thomas Gleixnere5931942009-12-03 20:08:46 +0100268static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700270 while (1) {
Thomas Gleixnere5931942009-12-03 20:08:46 +0100271 if (likely(__arch_read_trylock(rw) > 0))
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700272 break;
273 do {
274 HMT_low();
275 if (SHARED_PROCESSOR)
276 __rw_yield(rw);
277 } while (unlikely(rw->lock < 0));
278 HMT_medium();
279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
281
Thomas Gleixnere5931942009-12-03 20:08:46 +0100282static inline void arch_write_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283{
284 while (1) {
Thomas Gleixnere5931942009-12-03 20:08:46 +0100285 if (likely(__arch_write_trylock(rw) == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 break;
287 do {
288 HMT_low();
289 if (SHARED_PROCESSOR)
290 __rw_yield(rw);
Jake Moilanend6374132005-05-01 08:58:47 -0700291 } while (unlikely(rw->lock != 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 HMT_medium();
293 }
294}
295
Thomas Gleixnere5931942009-12-03 20:08:46 +0100296static inline int arch_read_trylock(arch_rwlock_t *rw)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700297{
Thomas Gleixnere5931942009-12-03 20:08:46 +0100298 return __arch_read_trylock(rw) > 0;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700299}
300
Thomas Gleixnere5931942009-12-03 20:08:46 +0100301static inline int arch_write_trylock(arch_rwlock_t *rw)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700302{
Thomas Gleixnere5931942009-12-03 20:08:46 +0100303 return __arch_write_trylock(rw) == 0;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700304}
305
Thomas Gleixnere5931942009-12-03 20:08:46 +0100306static inline void arch_read_unlock(arch_rwlock_t *rw)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700307{
308 long tmp;
309
310 __asm__ __volatile__(
Anton Blanchard144b9c132006-01-13 15:37:17 +1100311 "# read_unlock\n\t"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000312 PPC_RELEASE_BARRIER
Anton Blanchard144b9c132006-01-13 15:37:17 +1100313"1: lwarx %0,0,%1\n\
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100314 addic %0,%0,-1\n"
315 PPC405_ERR77(0,%1)
316" stwcx. %0,0,%1\n\
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700317 bne- 1b"
318 : "=&r"(tmp)
319 : "r"(&rw->lock)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000320 : "cr0", "xer", "memory");
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700321}
322
Thomas Gleixnere5931942009-12-03 20:08:46 +0100323static inline void arch_write_unlock(arch_rwlock_t *rw)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700324{
Anton Blanchard144b9c132006-01-13 15:37:17 +1100325 __asm__ __volatile__("# write_unlock\n\t"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000326 PPC_RELEASE_BARRIER: : :"memory");
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700327 rw->lock = 0;
328}
329
Thomas Gleixnere5931942009-12-03 20:08:46 +0100330#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
331#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Robin Holtf5f7eac2009-04-02 16:59:46 -0700332
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100333#define arch_spin_relax(lock) __spin_yield(lock)
334#define arch_read_relax(lock) __rw_yield(lock)
335#define arch_write_relax(lock) __rw_yield(lock)
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700336
Arnd Bergmann88ced032005-12-16 22:43:46 +0100337#endif /* __KERNEL__ */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338#endif /* __ASM_SPINLOCK_H */