blob: 7ecd8902a5c36bc96dfe52083e067365a0d3a71d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 *
6 * Derived from "include/asm-i386/spinlock.h"
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070012#include <linux/smp.h>
Peter Zijlstra726328d2016-05-26 10:35:03 +020013#include <asm/barrier.h>
14#include <asm/processor.h>
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070015
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +020016#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
17
Martin Schwidefsky638ad342011-10-30 15:17:13 +010018extern int spin_retry;
19
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070020static inline int
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020021_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070022{
Martin Schwidefskyf318a122014-10-29 12:50:31 +010023 return __sync_bool_compare_and_swap(lock, old, new);
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070024}
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Christian Borntraeger760928c2016-11-02 05:08:32 -040026#ifndef CONFIG_SMP
27static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
28#else
29bool arch_vcpu_is_preempted(int cpu);
30#endif
31
32#define vcpu_is_preempted arch_vcpu_is_preempted
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
35 * Simple spin lock operations. There are two variants, one clears IRQ's
36 * on the local processor, one does not.
37 *
38 * We make no fairness assumptions. They have a cost.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070039 *
40 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 */
42
Martin Schwidefskyd59b93d2014-09-19 14:29:31 +020043void arch_lock_relax(unsigned int cpu);
44
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020045void arch_spin_lock_wait(arch_spinlock_t *);
46int arch_spin_trylock_retry(arch_spinlock_t *);
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020047void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Martin Schwidefskyd59b93d2014-09-19 14:29:31 +020049static inline void arch_spin_relax(arch_spinlock_t *lock)
50{
51 arch_lock_relax(lock->lock);
52}
53
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +020054static inline u32 arch_spin_lockval(int cpu)
55{
56 return ~cpu;
57}
58
Heiko Carstensefc1d232013-09-05 13:26:17 +020059static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
60{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020061 return lock.lock == 0;
62}
63
64static inline int arch_spin_is_locked(arch_spinlock_t *lp)
65{
66 return ACCESS_ONCE(lp->lock) != 0;
67}
68
69static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
70{
Martin Schwidefskybae8f562014-05-15 11:00:44 +020071 barrier();
72 return likely(arch_spin_value_unlocked(*lp) &&
73 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020074}
75
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010076static inline void arch_spin_lock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Martin Schwidefskybae8f562014-05-15 11:00:44 +020078 if (!arch_spin_trylock_once(lp))
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020079 arch_spin_lock_wait(lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010082static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020083 unsigned long flags)
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010084{
Martin Schwidefskybae8f562014-05-15 11:00:44 +020085 if (!arch_spin_trylock_once(lp))
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020086 arch_spin_lock_wait_flags(lp, flags);
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010087}
88
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010089static inline int arch_spin_trylock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Martin Schwidefskybae8f562014-05-15 11:00:44 +020091 if (!arch_spin_trylock_once(lp))
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020092 return arch_spin_trylock_retry(lp);
93 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010096static inline void arch_spin_unlock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Heiko Carstens44230282014-09-08 08:20:43 +020098 typecheck(unsigned int, lp->lock);
99 asm volatile(
Heiko Carstens44230282014-09-08 08:20:43 +0200100 "st %1,%0\n"
101 : "+Q" (lp->lock)
102 : "d" (0)
103 : "cc", "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104}
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200105
106static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
107{
108 while (arch_spin_is_locked(lock))
109 arch_spin_relax(lock);
Peter Zijlstra726328d2016-05-26 10:35:03 +0200110 smp_acquire__after_ctrl_dep();
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200111}
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/*
114 * Read-write spinlocks, allowing multiple readers
115 * but only one writer.
116 *
117 * NOTE! it is quite common to have readers in interrupts
118 * but no interrupt writers. For those circumstances we
119 * can "mix" irq-safe locks - any writer needs to get a
120 * irq-safe write-lock, but readers can get non-irqsafe
121 * read-locks.
122 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124/**
125 * read_can_lock - would read_trylock() succeed?
126 * @lock: the rwlock in question.
127 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100128#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130/**
131 * write_can_lock - would write_trylock() succeed?
132 * @lock: the rwlock in question.
133 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100134#define arch_write_can_lock(x) ((x)->lock == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Martin Schwidefsky2684e732014-09-22 14:45:11 +0200136extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100137extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Martin Schwidefsky2684e732014-09-22 14:45:11 +0200139#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
140#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
141
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200142static inline int arch_read_trylock_once(arch_rwlock_t *rw)
143{
144 unsigned int old = ACCESS_ONCE(rw->lock);
145 return likely((int) old >= 0 &&
146 _raw_compare_and_swap(&rw->lock, old, old + 1));
147}
148
149static inline int arch_write_trylock_once(arch_rwlock_t *rw)
150{
151 unsigned int old = ACCESS_ONCE(rw->lock);
152 return likely(old == 0 &&
153 _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
154}
155
Martin Schwidefskybbae71b2014-09-22 16:34:38 +0200156#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
157
158#define __RAW_OP_OR "lao"
159#define __RAW_OP_AND "lan"
160#define __RAW_OP_ADD "laa"
161
162#define __RAW_LOCK(ptr, op_val, op_string) \
163({ \
164 unsigned int old_val; \
165 \
166 typecheck(unsigned int *, ptr); \
167 asm volatile( \
168 op_string " %0,%2,%1\n" \
169 "bcr 14,0\n" \
170 : "=d" (old_val), "+Q" (*ptr) \
171 : "d" (op_val) \
172 : "cc", "memory"); \
173 old_val; \
174})
175
176#define __RAW_UNLOCK(ptr, op_val, op_string) \
177({ \
178 unsigned int old_val; \
179 \
180 typecheck(unsigned int *, ptr); \
181 asm volatile( \
Martin Schwidefskybbae71b2014-09-22 16:34:38 +0200182 op_string " %0,%2,%1\n" \
183 : "=d" (old_val), "+Q" (*ptr) \
184 : "d" (op_val) \
185 : "cc", "memory"); \
186 old_val; \
187})
188
189extern void _raw_read_lock_wait(arch_rwlock_t *lp);
190extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
191
192static inline void arch_read_lock(arch_rwlock_t *rw)
193{
194 unsigned int old;
195
196 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
197 if ((int) old < 0)
198 _raw_read_lock_wait(rw);
199}
200
201static inline void arch_read_unlock(arch_rwlock_t *rw)
202{
203 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
204}
205
206static inline void arch_write_lock(arch_rwlock_t *rw)
207{
208 unsigned int old;
209
210 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
211 if (old != 0)
212 _raw_write_lock_wait(rw, old);
213 rw->owner = SPINLOCK_LOCKVAL;
214}
215
216static inline void arch_write_unlock(arch_rwlock_t *rw)
217{
218 rw->owner = 0;
219 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
220}
221
222#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
223
224extern void _raw_read_lock_wait(arch_rwlock_t *lp);
225extern void _raw_write_lock_wait(arch_rwlock_t *lp);
226
Thomas Gleixnere5931942009-12-03 20:08:46 +0100227static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228{
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200229 if (!arch_read_trylock_once(rw))
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700230 _raw_read_lock_wait(rw);
231}
232
Thomas Gleixnere5931942009-12-03 20:08:46 +0100233static inline void arch_read_unlock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700234{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200235 unsigned int old;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700236
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700237 do {
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200238 old = ACCESS_ONCE(rw->lock);
239 } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700240}
241
Thomas Gleixnere5931942009-12-03 20:08:46 +0100242static inline void arch_write_lock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700243{
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200244 if (!arch_write_trylock_once(rw))
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700245 _raw_write_lock_wait(rw);
Martin Schwidefskyd59b93d2014-09-19 14:29:31 +0200246 rw->owner = SPINLOCK_LOCKVAL;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700247}
248
Thomas Gleixnere5931942009-12-03 20:08:46 +0100249static inline void arch_write_unlock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700250{
Heiko Carstens44230282014-09-08 08:20:43 +0200251 typecheck(unsigned int, rw->lock);
Martin Schwidefskyd59b93d2014-09-19 14:29:31 +0200252
253 rw->owner = 0;
Heiko Carstens44230282014-09-08 08:20:43 +0200254 asm volatile(
Heiko Carstens44230282014-09-08 08:20:43 +0200255 "st %1,%0\n"
256 : "+Q" (rw->lock)
257 : "d" (0)
258 : "cc", "memory");
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700259}
260
Martin Schwidefskybbae71b2014-09-22 16:34:38 +0200261#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
262
Thomas Gleixnere5931942009-12-03 20:08:46 +0100263static inline int arch_read_trylock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700264{
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200265 if (!arch_read_trylock_once(rw))
266 return _raw_read_trylock_retry(rw);
267 return 1;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700268}
269
Thomas Gleixnere5931942009-12-03 20:08:46 +0100270static inline int arch_write_trylock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700271{
Martin Schwidefskyd59b93d2014-09-19 14:29:31 +0200272 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
273 return 0;
274 rw->owner = SPINLOCK_LOCKVAL;
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200275 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276}
277
Martin Schwidefskyd59b93d2014-09-19 14:29:31 +0200278static inline void arch_read_relax(arch_rwlock_t *rw)
279{
280 arch_lock_relax(rw->owner);
281}
282
283static inline void arch_write_relax(arch_rwlock_t *rw)
284{
285 arch_lock_relax(rw->owner);
286}
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288#endif /* __ASM_SPINLOCK_H */