blob: 96879f7ad6daaebd896745a1edf08cd134e8546c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 *
6 * Derived from "include/asm-i386/spinlock.h"
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070012#include <linux/smp.h>
13
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +020014#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
15
Martin Schwidefsky638ad342011-10-30 15:17:13 +010016extern int spin_retry;
17
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070018static inline int
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020019_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070020{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020021 unsigned int old_expected = old;
22
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020023 asm volatile(
24 " cs %0,%3,%1"
25 : "=d" (old), "=Q" (*lock)
26 : "0" (old), "d" (new), "Q" (*lock)
27 : "cc", "memory" );
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020028 return old == old_expected;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070029}
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31/*
32 * Simple spin lock operations. There are two variants, one clears IRQ's
33 * on the local processor, one does not.
34 *
35 * We make no fairness assumptions. They have a cost.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070036 *
37 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 */
39
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020040void arch_spin_lock_wait(arch_spinlock_t *);
41int arch_spin_trylock_retry(arch_spinlock_t *);
42void arch_spin_relax(arch_spinlock_t *);
43void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +020045static inline u32 arch_spin_lockval(int cpu)
46{
47 return ~cpu;
48}
49
Heiko Carstensefc1d232013-09-05 13:26:17 +020050static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
51{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020052 return lock.lock == 0;
53}
54
55static inline int arch_spin_is_locked(arch_spinlock_t *lp)
56{
57 return ACCESS_ONCE(lp->lock) != 0;
58}
59
60static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
61{
Martin Schwidefskybae8f562014-05-15 11:00:44 +020062 barrier();
63 return likely(arch_spin_value_unlocked(*lp) &&
64 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020065}
66
67static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
68{
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +020069 return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
Heiko Carstensefc1d232013-09-05 13:26:17 +020070}
71
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010072static inline void arch_spin_lock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
Martin Schwidefskybae8f562014-05-15 11:00:44 +020074 if (!arch_spin_trylock_once(lp))
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020075 arch_spin_lock_wait(lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076}
77
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010078static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020079 unsigned long flags)
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010080{
Martin Schwidefskybae8f562014-05-15 11:00:44 +020081 if (!arch_spin_trylock_once(lp))
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020082 arch_spin_lock_wait_flags(lp, flags);
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010083}
84
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010085static inline int arch_spin_trylock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086{
Martin Schwidefskybae8f562014-05-15 11:00:44 +020087 if (!arch_spin_trylock_once(lp))
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020088 return arch_spin_trylock_retry(lp);
89 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010092static inline void arch_spin_unlock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020094 arch_spin_tryrelease_once(lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +020096
97static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
98{
99 while (arch_spin_is_locked(lock))
100 arch_spin_relax(lock);
101}
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103/*
104 * Read-write spinlocks, allowing multiple readers
105 * but only one writer.
106 *
107 * NOTE! it is quite common to have readers in interrupts
108 * but no interrupt writers. For those circumstances we
109 * can "mix" irq-safe locks - any writer needs to get a
110 * irq-safe write-lock, but readers can get non-irqsafe
111 * read-locks.
112 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114/**
115 * read_can_lock - would read_trylock() succeed?
116 * @lock: the rwlock in question.
117 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100118#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120/**
121 * write_can_lock - would write_trylock() succeed?
122 * @lock: the rwlock in question.
123 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100124#define arch_write_can_lock(x) ((x)->lock == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100126extern void _raw_read_lock_wait(arch_rwlock_t *lp);
127extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
128extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
129extern void _raw_write_lock_wait(arch_rwlock_t *lp);
130extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
131extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200133static inline int arch_read_trylock_once(arch_rwlock_t *rw)
134{
135 unsigned int old = ACCESS_ONCE(rw->lock);
136 return likely((int) old >= 0 &&
137 _raw_compare_and_swap(&rw->lock, old, old + 1));
138}
139
140static inline int arch_write_trylock_once(arch_rwlock_t *rw)
141{
142 unsigned int old = ACCESS_ONCE(rw->lock);
143 return likely(old == 0 &&
144 _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
145}
146
Thomas Gleixnere5931942009-12-03 20:08:46 +0100147static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148{
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200149 if (!arch_read_trylock_once(rw))
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700150 _raw_read_lock_wait(rw);
151}
152
Thomas Gleixnere5931942009-12-03 20:08:46 +0100153static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200154{
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200155 if (!arch_read_trylock_once(rw))
Heiko Carstensce58ae62009-06-12 10:26:22 +0200156 _raw_read_lock_wait_flags(rw, flags);
157}
158
Thomas Gleixnere5931942009-12-03 20:08:46 +0100159static inline void arch_read_unlock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700160{
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200161 unsigned int old;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700162
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700163 do {
Philipp Hachtmann5b3f6832014-04-07 18:25:23 +0200164 old = ACCESS_ONCE(rw->lock);
165 } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700166}
167
Thomas Gleixnere5931942009-12-03 20:08:46 +0100168static inline void arch_write_lock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700169{
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200170 if (!arch_write_trylock_once(rw))
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700171 _raw_write_lock_wait(rw);
172}
173
Thomas Gleixnere5931942009-12-03 20:08:46 +0100174static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200175{
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200176 if (!arch_write_trylock_once(rw))
Heiko Carstensce58ae62009-06-12 10:26:22 +0200177 _raw_write_lock_wait_flags(rw, flags);
178}
179
Thomas Gleixnere5931942009-12-03 20:08:46 +0100180static inline void arch_write_unlock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700181{
182 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
183}
184
Thomas Gleixnere5931942009-12-03 20:08:46 +0100185static inline int arch_read_trylock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700186{
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200187 if (!arch_read_trylock_once(rw))
188 return _raw_read_trylock_retry(rw);
189 return 1;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700190}
191
Thomas Gleixnere5931942009-12-03 20:08:46 +0100192static inline int arch_write_trylock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700193{
Martin Schwidefskybae8f562014-05-15 11:00:44 +0200194 if (!arch_write_trylock_once(rw))
195 return _raw_write_trylock_retry(rw);
196 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100199#define arch_read_relax(lock) cpu_relax()
200#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202#endif /* __ASM_SPINLOCK_H */