blob: 56612fc8186ef25251ab235458b187bcda73f2b2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-s390/spinlock.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/spinlock.h"
9 */
10
11#ifndef __ASM_SPINLOCK_H
12#define __ASM_SPINLOCK_H
13
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070014#include <linux/smp.h>
15
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070016static inline int
17_raw_compare_and_swap(volatile unsigned int *lock,
18 unsigned int old, unsigned int new)
19{
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020020 asm volatile(
21 " cs %0,%3,%1"
22 : "=d" (old), "=Q" (*lock)
23 : "0" (old), "d" (new), "Q" (*lock)
24 : "cc", "memory" );
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070025 return old;
26}
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28/*
29 * Simple spin lock operations. There are two variants, one clears IRQ's
30 * on the local processor, one does not.
31 *
32 * We make no fairness assumptions. They have a cost.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070033 *
34 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
36
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010037#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
38#define arch_spin_unlock_wait(lock) \
39 do { while (arch_spin_is_locked(lock)) \
40 arch_spin_relax(lock); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010042extern void arch_spin_lock_wait(arch_spinlock_t *);
43extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44extern int arch_spin_trylock_retry(arch_spinlock_t *);
45extern void arch_spin_relax(arch_spinlock_t *lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010047static inline void arch_spin_lock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048{
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070049 int old;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070050
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070051 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
Heiko Carstens3b4beb32008-01-26 14:11:03 +010052 if (likely(old == 0))
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070053 return;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010054 arch_spin_lock_wait(lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055}
56
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010057static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010058 unsigned long flags)
59{
60 int old;
61
62 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
63 if (likely(old == 0))
64 return;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010065 arch_spin_lock_wait_flags(lp, flags);
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010066}
67
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010068static inline int arch_spin_trylock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070070 int old;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070072 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
Heiko Carstens3b4beb32008-01-26 14:11:03 +010073 if (likely(old == 0))
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070074 return 1;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010075 return arch_spin_trylock_retry(lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076}
77
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010078static inline void arch_spin_unlock(arch_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070080 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
83/*
84 * Read-write spinlocks, allowing multiple readers
85 * but only one writer.
86 *
87 * NOTE! it is quite common to have readers in interrupts
88 * but no interrupt writers. For those circumstances we
89 * can "mix" irq-safe locks - any writer needs to get a
90 * irq-safe write-lock, but readers can get non-irqsafe
91 * read-locks.
92 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94/**
95 * read_can_lock - would read_trylock() succeed?
96 * @lock: the rwlock in question.
97 */
Thomas Gleixnere5931942009-12-03 20:08:46 +010098#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
100/**
101 * write_can_lock - would write_trylock() succeed?
102 * @lock: the rwlock in question.
103 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100104#define arch_write_can_lock(x) ((x)->lock == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100106extern void _raw_read_lock_wait(arch_rwlock_t *lp);
107extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
108extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
109extern void _raw_write_lock_wait(arch_rwlock_t *lp);
110extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
111extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Thomas Gleixnere5931942009-12-03 20:08:46 +0100113static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114{
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700115 unsigned int old;
116 old = rw->lock & 0x7fffffffU;
117 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
118 _raw_read_lock_wait(rw);
119}
120
Thomas Gleixnere5931942009-12-03 20:08:46 +0100121static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200122{
123 unsigned int old;
124 old = rw->lock & 0x7fffffffU;
125 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
126 _raw_read_lock_wait_flags(rw, flags);
127}
128
Thomas Gleixnere5931942009-12-03 20:08:46 +0100129static inline void arch_read_unlock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700130{
131 unsigned int old, cmp;
132
133 old = rw->lock;
134 do {
135 cmp = old;
136 old = _raw_compare_and_swap(&rw->lock, old, old - 1);
137 } while (cmp != old);
138}
139
Thomas Gleixnere5931942009-12-03 20:08:46 +0100140static inline void arch_write_lock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700141{
142 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
143 _raw_write_lock_wait(rw);
144}
145
Thomas Gleixnere5931942009-12-03 20:08:46 +0100146static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200147{
148 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
149 _raw_write_lock_wait_flags(rw, flags);
150}
151
Thomas Gleixnere5931942009-12-03 20:08:46 +0100152static inline void arch_write_unlock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700153{
154 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
155}
156
Thomas Gleixnere5931942009-12-03 20:08:46 +0100157static inline int arch_read_trylock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700158{
159 unsigned int old;
160 old = rw->lock & 0x7fffffffU;
161 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
162 return 1;
163 return _raw_read_trylock_retry(rw);
164}
165
Thomas Gleixnere5931942009-12-03 20:08:46 +0100166static inline int arch_write_trylock(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700167{
168 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
169 return 1;
170 return _raw_write_trylock_retry(rw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100173#define arch_read_relax(lock) cpu_relax()
174#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176#endif /* __ASM_SPINLOCK_H */