blob: bfaf175db54d1ce5f1f17ceac7f51288feffc658 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
Arnd Bergmann88ced032005-12-16 22:43:46 +01003#ifdef __KERNEL__
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5/*
6 * Simple spin lock operations.
7 *
8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11 * Rework to support virtual processors
12 *
13 * Type of int is used as a full 64b word is not necessary.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070019 *
20 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100022#include <linux/irqflags.h>
Paul Mackerras0212ddd2005-11-19 20:50:46 +110023#ifdef CONFIG_PPC64
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/paca.h>
25#include <asm/hvcall.h>
Paul Mackerras0212ddd2005-11-19 20:50:46 +110026#endif
Paul Mackerras0212ddd2005-11-19 20:50:46 +110027#include <asm/synch.h>
Anton Blanchard4e14a4d2010-02-10 00:57:28 +000028#include <asm/ppc-opcode.h>
Christophe Leroy36a7eea2018-07-05 16:24:55 +000029#include <asm/asm-405.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Paul Mackerras0212ddd2005-11-19 20:50:46 +110031#ifdef CONFIG_PPC64
32/* use 0x800000yy when locked, where yy == CPU number */
Anton Blanchard54bb7f42013-08-07 02:01:51 +100033#ifdef __BIG_ENDIAN__
Paul Mackerras0212ddd2005-11-19 20:50:46 +110034#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
35#else
Anton Blanchard54bb7f42013-08-07 02:01:51 +100036#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
37#endif
38#else
Paul Mackerras0212ddd2005-11-19 20:50:46 +110039#define LOCK_TOKEN 1
40#endif
41
Paul Mackerrasf007cac2006-09-13 22:08:26 +100042#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
43#define CLEAR_IO_SYNC (get_paca()->io_sync = 0)
44#define SYNC_IO do { \
45 if (unlikely(get_paca()->io_sync)) { \
46 mb(); \
47 get_paca()->io_sync = 0; \
48 } \
49 } while (0)
50#else
51#define CLEAR_IO_SYNC
52#define SYNC_IO
53#endif
54
Pan Xinhui41946c82016-11-02 05:08:31 -040055#ifdef CONFIG_PPC_PSERIES
Srikar Dronamraju04a20962019-12-05 14:02:17 +053056DECLARE_STATIC_KEY_FALSE(shared_processor);
57
Pan Xinhui41946c82016-11-02 05:08:31 -040058#define vcpu_is_preempted vcpu_is_preempted
59static inline bool vcpu_is_preempted(int cpu)
60{
Srikar Dronamraju04a20962019-12-05 14:02:17 +053061 if (!static_branch_unlikely(&shared_processor))
Aneesh Kumar K.Va6201da2018-04-02 13:03:37 +053062 return false;
Pan Xinhui41946c82016-11-02 05:08:31 -040063 return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
64}
65#endif
66
Michael Ellerman3405d232014-01-15 18:14:28 +110067static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
68{
69 return lock.slock == 0;
70}
71
Michael Ellerman7179ba52014-01-15 18:14:29 +110072static inline int arch_spin_is_locked(arch_spinlock_t *lock)
73{
Michael Ellerman51d7d522014-08-07 15:36:17 +100074 smp_mb();
Michael Ellerman7179ba52014-01-15 18:14:29 +110075 return !arch_spin_value_unlocked(*lock);
76}
77
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070078/*
79 * This returns the old value in the lock, so we succeeded
80 * in getting the lock if the return value is 0.
81 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010082static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
Paul Mackerras0212ddd2005-11-19 20:50:46 +110084 unsigned long tmp, token;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070085
Paul Mackerras0212ddd2005-11-19 20:50:46 +110086 token = LOCK_TOKEN;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070087 __asm__ __volatile__(
Anton Blanchard4e14a4d2010-02-10 00:57:28 +000088"1: " PPC_LWARX(%0,0,%2,1) "\n\
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070089 cmpwi 0,%0,0\n\
90 bne- 2f\n\
91 stwcx. %1,0,%2\n\
Anton Blanchardf10e2e52010-02-10 01:04:06 +000092 bne- 1b\n"
93 PPC_ACQUIRE_BARRIER
94"2:"
95 : "=&r" (tmp)
Paul Mackerras0212ddd2005-11-19 20:50:46 +110096 : "r" (token), "r" (&lock->slock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070097 : "cr0", "memory");
98
99 return tmp;
100}
101
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100102static inline int arch_spin_trylock(arch_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700103{
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000104 CLEAR_IO_SYNC;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100105 return __arch_spin_trylock(lock) == 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106}
107
108/*
109 * On a system with shared processors (that is, where a physical
110 * processor is multiplexed between several virtual processors),
111 * there is no point spinning on a lock if the holder of the lock
112 * isn't currently scheduled on a physical processor. Instead
113 * we detect this situation and ask the hypervisor to give the
114 * rest of our timeslice to the lock holder.
115 *
116 * So that we can tell which virtual processor is holding a lock,
117 * we put 0x80000000 | smp_processor_id() in the lock when it is
118 * held. Conveniently, we have a word in the paca that holds this
119 * value.
120 */
121
Stephen Rothwell1b041882012-03-15 18:20:13 +0000122#if defined(CONFIG_PPC_SPLPAR)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123/* We only yield to the hypervisor if we are in shared processor mode */
Anton Blanchardf13c13a2013-08-07 02:01:26 +1000124#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
Thomas Gleixner445c8952009-12-02 19:49:50 +0100125extern void __spin_yield(arch_spinlock_t *lock);
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100126extern void __rw_yield(arch_rwlock_t *lock);
Stephen Rothwell1b041882012-03-15 18:20:13 +0000127#else /* SPLPAR */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#define __spin_yield(x) barrier()
129#define __rw_yield(x) barrier()
130#define SHARED_PROCESSOR 0
131#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100133static inline void arch_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000135 CLEAR_IO_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 while (1) {
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100137 if (likely(__arch_spin_trylock(lock) == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 break;
139 do {
140 HMT_low();
141 if (SHARED_PROCESSOR)
142 __spin_yield(lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700143 } while (unlikely(lock->slock != 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 HMT_medium();
145 }
146}
147
Bart Van Assche89b58102008-06-28 16:51:35 +1000148static inline
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100149void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
151 unsigned long flags_dis;
152
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000153 CLEAR_IO_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 while (1) {
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100155 if (likely(__arch_spin_trylock(lock) == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157 local_save_flags(flags_dis);
158 local_irq_restore(flags);
159 do {
160 HMT_low();
161 if (SHARED_PROCESSOR)
162 __spin_yield(lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700163 } while (unlikely(lock->slock != 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 HMT_medium();
165 local_irq_restore(flags_dis);
166 }
167}
Will Deacona4c18872017-10-03 19:25:29 +0100168#define arch_spin_lock_flags arch_spin_lock_flags
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100170static inline void arch_spin_unlock(arch_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700171{
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000172 SYNC_IO;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100173 __asm__ __volatile__("# arch_spin_unlock\n\t"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000174 PPC_RELEASE_BARRIER: : :"memory");
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700175 lock->slock = 0;
176}
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178/*
179 * Read-write spinlocks, allowing multiple readers
180 * but only one writer.
181 *
182 * NOTE! it is quite common to have readers in interrupts
183 * but no interrupt writers. For those circumstances we
184 * can "mix" irq-safe locks - any writer needs to get a
185 * irq-safe write-lock, but readers can get non-irqsafe
186 * read-locks.
187 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100189#ifdef CONFIG_PPC64
190#define __DO_SIGN_EXTEND "extsw %0,%0\n"
191#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
192#else
193#define __DO_SIGN_EXTEND
194#define WRLOCK_TOKEN (-1)
195#endif
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/*
198 * This returns the old value in the lock + 1,
199 * so we got a read lock if the return value is > 0.
200 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100201static inline long __arch_read_trylock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
203 long tmp;
204
205 __asm__ __volatile__(
Anton Blanchard4e14a4d2010-02-10 00:57:28 +0000206"1: " PPC_LWARX(%0,0,%1,1) "\n"
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100207 __DO_SIGN_EXTEND
208" addic. %0,%0,1\n\
209 ble- 2f\n"
210 PPC405_ERR77(0,%1)
211" stwcx. %0,0,%1\n\
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000212 bne- 1b\n"
213 PPC_ACQUIRE_BARRIER
214"2:" : "=&r" (tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 : "r" (&rw->lock)
216 : "cr0", "xer", "memory");
217
218 return tmp;
219}
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221/*
222 * This returns the old value in the lock,
223 * so we got the write lock if the return value is 0.
224 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100225static inline long __arch_write_trylock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100227 long tmp, token;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100229 token = WRLOCK_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 __asm__ __volatile__(
Anton Blanchard4e14a4d2010-02-10 00:57:28 +0000231"1: " PPC_LWARX(%0,0,%2,1) "\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 cmpwi 0,%0,0\n\
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100233 bne- 2f\n"
234 PPC405_ERR77(0,%1)
235" stwcx. %1,0,%2\n\
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000236 bne- 1b\n"
237 PPC_ACQUIRE_BARRIER
238"2:" : "=&r" (tmp)
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100239 : "r" (token), "r" (&rw->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 : "cr0", "memory");
241
242 return tmp;
243}
244
Thomas Gleixnere5931942009-12-03 20:08:46 +0100245static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700247 while (1) {
Thomas Gleixnere5931942009-12-03 20:08:46 +0100248 if (likely(__arch_read_trylock(rw) > 0))
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700249 break;
250 do {
251 HMT_low();
252 if (SHARED_PROCESSOR)
253 __rw_yield(rw);
254 } while (unlikely(rw->lock < 0));
255 HMT_medium();
256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257}
258
Thomas Gleixnere5931942009-12-03 20:08:46 +0100259static inline void arch_write_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
261 while (1) {
Thomas Gleixnere5931942009-12-03 20:08:46 +0100262 if (likely(__arch_write_trylock(rw) == 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 break;
264 do {
265 HMT_low();
266 if (SHARED_PROCESSOR)
267 __rw_yield(rw);
Jake Moilanend6374132005-05-01 08:58:47 -0700268 } while (unlikely(rw->lock != 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 HMT_medium();
270 }
271}
272
Thomas Gleixnere5931942009-12-03 20:08:46 +0100273static inline int arch_read_trylock(arch_rwlock_t *rw)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700274{
Thomas Gleixnere5931942009-12-03 20:08:46 +0100275 return __arch_read_trylock(rw) > 0;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700276}
277
Thomas Gleixnere5931942009-12-03 20:08:46 +0100278static inline int arch_write_trylock(arch_rwlock_t *rw)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700279{
Thomas Gleixnere5931942009-12-03 20:08:46 +0100280 return __arch_write_trylock(rw) == 0;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700281}
282
Thomas Gleixnere5931942009-12-03 20:08:46 +0100283static inline void arch_read_unlock(arch_rwlock_t *rw)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700284{
285 long tmp;
286
287 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +1100288 "# read_unlock\n\t"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000289 PPC_RELEASE_BARRIER
Anton Blanchard144b9c12006-01-13 15:37:17 +1100290"1: lwarx %0,0,%1\n\
Paul Mackerras0212ddd2005-11-19 20:50:46 +1100291 addic %0,%0,-1\n"
292 PPC405_ERR77(0,%1)
293" stwcx. %0,0,%1\n\
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700294 bne- 1b"
295 : "=&r"(tmp)
296 : "r"(&rw->lock)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000297 : "cr0", "xer", "memory");
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700298}
299
Thomas Gleixnere5931942009-12-03 20:08:46 +0100300static inline void arch_write_unlock(arch_rwlock_t *rw)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700301{
Anton Blanchard144b9c12006-01-13 15:37:17 +1100302 __asm__ __volatile__("# write_unlock\n\t"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000303 PPC_RELEASE_BARRIER: : :"memory");
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700304 rw->lock = 0;
305}
306
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100307#define arch_spin_relax(lock) __spin_yield(lock)
308#define arch_read_relax(lock) __rw_yield(lock)
309#define arch_write_relax(lock) __rw_yield(lock)
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700310
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200311/* See include/linux/spinlock.h */
312#define smp_mb__after_spinlock() smp_mb()
313
Arnd Bergmann88ced032005-12-16 22:43:46 +0100314#endif /* __KERNEL__ */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315#endif /* __ASM_SPINLOCK_H */