blob: 5a0b2882ad485de9eff628ad0623bfa971678913 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 *
6 * Derived from "include/asm-i386/spinlock.h"
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070012#include <linux/smp.h>
13
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +020014#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
15
Martin Schwidefsky638ad342011-10-30 15:17:13 +010016extern int spin_retry;
17
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070018static inline int
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020019_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070020{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020021 unsigned int old_expected = old;
22
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020023 asm volatile(
24 " cs %0,%3,%1"
25 : "=d" (old), "=Q" (*lock)
26 : "0" (old), "d" (new), "Q" (*lock)
27 : "cc", "memory" );
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020028 return old == old_expected;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070029}
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31/*
32 * Simple spin lock operations. There are two variants, one clears IRQ's
33 * on the local processor, one does not.
34 *
35 * We make no fairness assumptions. They have a cost.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070036 *
37 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 */
39
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020040void arch_spin_lock_wait(arch_spinlock_t *);
41int arch_spin_trylock_retry(arch_spinlock_t *);
42void arch_spin_relax(arch_spinlock_t *);
43void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +020045static inline u32 arch_spin_lockval(int cpu)
46{
47 return ~cpu;
48}
49
Heiko Carstensefc1d232013-09-05 13:26:17 +020050static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
51{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020052 return lock.lock == 0;
53}
54
55static inline int arch_spin_is_locked(arch_spinlock_t *lp)
56{
57 return ACCESS_ONCE(lp->lock) != 0;
58}
59
60static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
61{
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +020062 return _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL);
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020063}
64
65static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
66{
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +020067 return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
Heiko Carstensefc1d232013-09-05 13:26:17 +020068}
69
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010070static inline void arch_spin_lock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020072 if (unlikely(!arch_spin_trylock_once(lp)))
73 arch_spin_lock_wait(lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010076static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020077 unsigned long flags)
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010078{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020079 if (unlikely(!arch_spin_trylock_once(lp)))
80 arch_spin_lock_wait_flags(lp, flags);
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010081}
82
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010083static inline int arch_spin_trylock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020085 if (unlikely(!arch_spin_trylock_once(lp)))
86 return arch_spin_trylock_retry(lp);
87 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010090static inline void arch_spin_unlock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020092 arch_spin_tryrelease_once(lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020094
95static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
96{
97 while (arch_spin_is_locked(lock))
98 arch_spin_relax(lock);
99}
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/*
102 * Read-write spinlocks, allowing multiple readers
103 * but only one writer.
104 *
105 * NOTE! it is quite common to have readers in interrupts
106 * but no interrupt writers. For those circumstances we
107 * can "mix" irq-safe locks - any writer needs to get a
108 * irq-safe write-lock, but readers can get non-irqsafe
109 * read-locks.
110 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112/**
113 * read_can_lock - would read_trylock() succeed?
114 * @lock: the rwlock in question.
115 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100116#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118/**
119 * write_can_lock - would write_trylock() succeed?
120 * @lock: the rwlock in question.
121 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100122#define arch_write_can_lock(x) ((x)->lock == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100124extern void _raw_read_lock_wait(arch_rwlock_t *lp);
125extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
126extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
127extern void _raw_write_lock_wait(arch_rwlock_t *lp);
128extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
129extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Thomas Gleixnere5931942009-12-03 20:08:46 +0100131static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700133 unsigned int old;
134 old = rw->lock & 0x7fffffffU;
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200135 if (!_raw_compare_and_swap(&rw->lock, old, old + 1))
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700136 _raw_read_lock_wait(rw);
137}
138
Thomas Gleixnere5931942009-12-03 20:08:46 +0100139static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200140{
141 unsigned int old;
142 old = rw->lock & 0x7fffffffU;
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200143 if (!_raw_compare_and_swap(&rw->lock, old, old + 1))
Heiko Carstensce58ae62009-06-12 10:26:22 +0200144 _raw_read_lock_wait_flags(rw, flags);
145}
146
Thomas Gleixnere5931942009-12-03 20:08:46 +0100147static inline void arch_read_unlock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700148{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200149 unsigned int old;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700150
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700151 do {
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200152 old = ACCESS_ONCE(rw->lock);
153 } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700154}
155
Thomas Gleixnere5931942009-12-03 20:08:46 +0100156static inline void arch_write_lock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700157{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200158 if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700159 _raw_write_lock_wait(rw);
160}
161
Thomas Gleixnere5931942009-12-03 20:08:46 +0100162static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200163{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200164 if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
Heiko Carstensce58ae62009-06-12 10:26:22 +0200165 _raw_write_lock_wait_flags(rw, flags);
166}
167
Thomas Gleixnere5931942009-12-03 20:08:46 +0100168static inline void arch_write_unlock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700169{
170 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
171}
172
Thomas Gleixnere5931942009-12-03 20:08:46 +0100173static inline int arch_read_trylock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700174{
175 unsigned int old;
176 old = rw->lock & 0x7fffffffU;
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200177 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1)))
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700178 return 1;
179 return _raw_read_trylock_retry(rw);
180}
181
Thomas Gleixnere5931942009-12-03 20:08:46 +0100182static inline int arch_write_trylock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700183{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200184 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700185 return 1;
186 return _raw_write_trylock_retry(rw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100189#define arch_read_relax(lock) cpu_relax()
190#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192#endif /* __ASM_SPINLOCK_H */