blob: a587907d77f32e208cc0c8ed6e8da87f050c757e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-s390/spinlock.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/spinlock.h"
9 */
10
11#ifndef __ASM_SPINLOCK_H
12#define __ASM_SPINLOCK_H
13
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070014#include <linux/smp.h>
15
Martin Schwidefsky42e47ee2006-10-04 20:02:12 +020016#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
17
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070018static inline int
19_raw_compare_and_swap(volatile unsigned int *lock,
20 unsigned int old, unsigned int new)
21{
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020022 asm volatile(
23 " cs %0,%3,%1"
24 : "=d" (old), "=Q" (*lock)
25 : "0" (old), "d" (new), "Q" (*lock)
26 : "cc", "memory" );
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070027 return old;
28}
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020030#else /* __GNUC__ */
31
32static inline int
33_raw_compare_and_swap(volatile unsigned int *lock,
34 unsigned int old, unsigned int new)
35{
36 asm volatile(
37 " cs %0,%3,0(%4)"
38 : "=d" (old), "=m" (*lock)
39 : "0" (old), "d" (new), "a" (lock), "m" (*lock)
40 : "cc", "memory" );
41 return old;
42}
43
44#endif /* __GNUC__ */
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/*
47 * Simple spin lock operations. There are two variants, one clears IRQ's
48 * on the local processor, one does not.
49 *
50 * We make no fairness assumptions. They have a cost.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070051 *
52 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 */
54
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010055#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
56#define arch_spin_unlock_wait(lock) \
57 do { while (arch_spin_is_locked(lock)) \
58 arch_spin_relax(lock); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010060extern void arch_spin_lock_wait(arch_spinlock_t *);
61extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
62extern int arch_spin_trylock_retry(arch_spinlock_t *);
63extern void arch_spin_relax(arch_spinlock_t *lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010065static inline void arch_spin_lock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070067 int old;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070068
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070069 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
Heiko Carstens3b4beb32008-01-26 14:11:03 +010070 if (likely(old == 0))
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070071 return;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010072 arch_spin_lock_wait(lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010075static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010076 unsigned long flags)
77{
78 int old;
79
80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
81 if (likely(old == 0))
82 return;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010083 arch_spin_lock_wait_flags(lp, flags);
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010084}
85
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010086static inline int arch_spin_trylock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070088 int old;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070090 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
Heiko Carstens3b4beb32008-01-26 14:11:03 +010091 if (likely(old == 0))
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070092 return 1;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010093 return arch_spin_trylock_retry(lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010096static inline void arch_spin_unlock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070098 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/*
102 * Read-write spinlocks, allowing multiple readers
103 * but only one writer.
104 *
105 * NOTE! it is quite common to have readers in interrupts
106 * but no interrupt writers. For those circumstances we
107 * can "mix" irq-safe locks - any writer needs to get a
108 * irq-safe write-lock, but readers can get non-irqsafe
109 * read-locks.
110 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112/**
113 * read_can_lock - would read_trylock() succeed?
114 * @lock: the rwlock in question.
115 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100116#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118/**
119 * write_can_lock - would write_trylock() succeed?
120 * @lock: the rwlock in question.
121 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100122#define arch_write_can_lock(x) ((x)->lock == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100124extern void _raw_read_lock_wait(arch_rwlock_t *lp);
125extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
126extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
127extern void _raw_write_lock_wait(arch_rwlock_t *lp);
128extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
129extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Thomas Gleixnere5931942009-12-03 20:08:46 +0100131static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700133 unsigned int old;
134 old = rw->lock & 0x7fffffffU;
135 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
136 _raw_read_lock_wait(rw);
137}
138
Thomas Gleixnere5931942009-12-03 20:08:46 +0100139static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200140{
141 unsigned int old;
142 old = rw->lock & 0x7fffffffU;
143 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
144 _raw_read_lock_wait_flags(rw, flags);
145}
146
Thomas Gleixnere5931942009-12-03 20:08:46 +0100147static inline void arch_read_unlock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700148{
149 unsigned int old, cmp;
150
151 old = rw->lock;
152 do {
153 cmp = old;
154 old = _raw_compare_and_swap(&rw->lock, old, old - 1);
155 } while (cmp != old);
156}
157
Thomas Gleixnere5931942009-12-03 20:08:46 +0100158static inline void arch_write_lock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700159{
160 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
161 _raw_write_lock_wait(rw);
162}
163
Thomas Gleixnere5931942009-12-03 20:08:46 +0100164static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200165{
166 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
167 _raw_write_lock_wait_flags(rw, flags);
168}
169
Thomas Gleixnere5931942009-12-03 20:08:46 +0100170static inline void arch_write_unlock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700171{
172 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
173}
174
Thomas Gleixnere5931942009-12-03 20:08:46 +0100175static inline int arch_read_trylock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700176{
177 unsigned int old;
178 old = rw->lock & 0x7fffffffU;
179 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
180 return 1;
181 return _raw_read_trylock_retry(rw);
182}
183
Thomas Gleixnere5931942009-12-03 20:08:46 +0100184static inline int arch_write_trylock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700185{
186 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
187 return 1;
188 return _raw_write_trylock_retry(rw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100191#define arch_read_relax(lock) cpu_relax()
192#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194#endif /* __ASM_SPINLOCK_H */