blob: 273dbecf8acef0465060133bbac01bab72118fc5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-s390/spinlock.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/spinlock.h"
9 */
10
11#ifndef __ASM_SPINLOCK_H
12#define __ASM_SPINLOCK_H
13
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070014static inline int
15_raw_compare_and_swap(volatile unsigned int *lock,
16 unsigned int old, unsigned int new)
17{
18 asm volatile ("cs %0,%3,0(%4)"
19 : "=d" (old), "=m" (*lock)
20 : "0" (old), "d" (new), "a" (lock), "m" (*lock)
21 : "cc", "memory" );
22 return old;
23}
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25/*
26 * Simple spin lock operations. There are two variants, one clears IRQ's
27 * on the local processor, one does not.
28 *
29 * We make no fairness assumptions. They have a cost.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070030 *
31 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 */
33
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070034#define __raw_spin_is_locked(x) ((x)->lock != 0)
35#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
36#define __raw_spin_unlock_wait(lock) \
37 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070039extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc);
40extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070042static inline void __raw_spin_lock(raw_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043{
Heiko Carstens9513e5e2005-09-03 15:58:05 -070044 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070045
46 if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0))
47 _raw_spin_lock_wait(lp, pc);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048}
49
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070050static inline int __raw_spin_trylock(raw_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051{
Heiko Carstens9513e5e2005-09-03 15:58:05 -070052 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070054 if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0))
55 return 1;
56 return _raw_spin_trylock_retry(lp, pc);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070059static inline void __raw_spin_unlock(raw_spinlock_t *lp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070061 _raw_compare_and_swap(&lp->lock, lp->lock, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
64/*
65 * Read-write spinlocks, allowing multiple readers
66 * but only one writer.
67 *
68 * NOTE! it is quite common to have readers in interrupts
69 * but no interrupt writers. For those circumstances we
70 * can "mix" irq-safe locks - any writer needs to get a
71 * irq-safe write-lock, but readers can get non-irqsafe
72 * read-locks.
73 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/**
76 * read_can_lock - would read_trylock() succeed?
77 * @lock: the rwlock in question.
78 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070079#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81/**
82 * write_can_lock - would write_trylock() succeed?
83 * @lock: the rwlock in question.
84 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070085#define __raw_write_can_lock(x) ((x)->lock == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070087extern void _raw_read_lock_wait(raw_rwlock_t *lp);
88extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
89extern void _raw_write_lock_wait(raw_rwlock_t *lp);
90extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070092static inline void __raw_read_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070094 unsigned int old;
95 old = rw->lock & 0x7fffffffU;
96 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
97 _raw_read_lock_wait(rw);
98}
99
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700100static inline void __raw_read_unlock(raw_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700101{
102 unsigned int old, cmp;
103
104 old = rw->lock;
105 do {
106 cmp = old;
107 old = _raw_compare_and_swap(&rw->lock, old, old - 1);
108 } while (cmp != old);
109}
110
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700111static inline void __raw_write_lock(raw_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700112{
113 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
114 _raw_write_lock_wait(rw);
115}
116
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700117static inline void __raw_write_unlock(raw_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700118{
119 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
120}
121
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700122static inline int __raw_read_trylock(raw_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700123{
124 unsigned int old;
125 old = rw->lock & 0x7fffffffU;
126 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
127 return 1;
128 return _raw_read_trylock_retry(rw);
129}
130
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700131static inline int __raw_write_trylock(raw_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700132{
133 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
134 return 1;
135 return _raw_write_trylock_retry(rw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136}
137
138#endif /* __ASM_SPINLOCK_H */