blob: 804aa28ab1d689fbf47c4a76f128d6c83072cdc5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07004#include <asm/processor.h>
5#include <asm/spinlock_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01007static inline int arch_spin_is_locked(arch_spinlock_t *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008{
9 volatile unsigned int *a = __ldcw_align(x);
10 return *a == 0;
11}
12
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010013#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
14#define arch_spin_unlock_wait(x) \
15 do { cpu_relax(); } while (arch_spin_is_locked(x))
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010017static inline void arch_spin_lock_flags(arch_spinlock_t *x,
James Bottomley08dc2ca2005-11-17 16:35:09 -050018 unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070019{
20 volatile unsigned int *a;
21
22 mb();
23 a = __ldcw_align(x);
24 while (__ldcw(a) == 0)
James Bottomley08dc2ca2005-11-17 16:35:09 -050025 while (*a == 0)
26 if (flags & PSW_SM_I) {
27 local_irq_enable();
28 cpu_relax();
29 local_irq_disable();
30 } else
31 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 mb();
33}
34
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010035static inline void arch_spin_unlock(arch_spinlock_t *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036{
37 volatile unsigned int *a;
38 mb();
39 a = __ldcw_align(x);
40 *a = 1;
41 mb();
42}
43
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010044static inline int arch_spin_trylock(arch_spinlock_t *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045{
46 volatile unsigned int *a;
47 int ret;
48
49 mb();
50 a = __ldcw_align(x);
51 ret = __ldcw(a) != 0;
52 mb();
53
54 return ret;
55}
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57/*
Matthew Wilcox6e071852006-09-02 07:54:58 -060058 * Read-write spinlocks, allowing multiple readers but only one writer.
Matthew Wilcox65ee8f0a2006-09-08 05:43:44 -060059 * Linux rwlocks are unfair to writers; they can be starved for an indefinite
60 * time by readers. With care, they can also be taken in interrupt context.
61 *
62 * In the PA-RISC implementation, we have a spinlock and a counter.
63 * Readers use the lock to serialise their access to the counter (which
64 * records how many readers currently hold the lock).
65 * Writers hold the spinlock, preventing any readers or other writers from
66 * grabbing the rwlock.
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Matthew Wilcox65ee8f0a2006-09-08 05:43:44 -060069/* Note that we have to ensure interrupts are disabled in case we're
70 * interrupted by some other code that wants to grab the same read lock */
Thomas Gleixnere5931942009-12-03 20:08:46 +010071static __inline__ void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Matthew Wilcox6e071852006-09-02 07:54:58 -060073 unsigned long flags;
74 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010075 arch_spin_lock_flags(&rw->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 rw->counter++;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010077 arch_spin_unlock(&rw->lock);
Matthew Wilcox6e071852006-09-02 07:54:58 -060078 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Matthew Wilcox65ee8f0a2006-09-08 05:43:44 -060081/* Note that we have to ensure interrupts are disabled in case we're
82 * interrupted by some other code that wants to grab the same read lock */
Thomas Gleixnere5931942009-12-03 20:08:46 +010083static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
Matthew Wilcox6e071852006-09-02 07:54:58 -060085 unsigned long flags;
86 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010087 arch_spin_lock_flags(&rw->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 rw->counter--;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010089 arch_spin_unlock(&rw->lock);
Matthew Wilcox6e071852006-09-02 07:54:58 -060090 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Matthew Wilcox65ee8f0a2006-09-08 05:43:44 -060093/* Note that we have to ensure interrupts are disabled in case we're
94 * interrupted by some other code that wants to grab the same read lock */
Thomas Gleixnere5931942009-12-03 20:08:46 +010095static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
Matthew Wilcox6e071852006-09-02 07:54:58 -060097 unsigned long flags;
98 retry:
99 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100100 if (arch_spin_trylock(&rw->lock)) {
Matthew Wilcox6e071852006-09-02 07:54:58 -0600101 rw->counter++;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100102 arch_spin_unlock(&rw->lock);
Matthew Wilcox6e071852006-09-02 07:54:58 -0600103 local_irq_restore(flags);
104 return 1;
105 }
106
107 local_irq_restore(flags);
108 /* If write-locked, we fail to acquire the lock */
109 if (rw->counter < 0)
110 return 0;
111
112 /* Wait until we have a realistic chance at the lock */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100113 while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
Matthew Wilcox6e071852006-09-02 07:54:58 -0600114 cpu_relax();
115
116 goto retry;
117}
118
Matthew Wilcox65ee8f0a2006-09-08 05:43:44 -0600119/* Note that we have to ensure interrupts are disabled in case we're
120 * interrupted by some other code that wants to read_trylock() this lock */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100121static __inline__ void arch_write_lock(arch_rwlock_t *rw)
Matthew Wilcox6e071852006-09-02 07:54:58 -0600122{
123 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124retry:
Matthew Wilcox6e071852006-09-02 07:54:58 -0600125 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100126 arch_spin_lock_flags(&rw->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Matthew Wilcox6e071852006-09-02 07:54:58 -0600128 if (rw->counter != 0) {
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100129 arch_spin_unlock(&rw->lock);
Matthew Wilcox6e071852006-09-02 07:54:58 -0600130 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700132 while (rw->counter != 0)
133 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135 goto retry;
136 }
137
Matthew Wilcox6e071852006-09-02 07:54:58 -0600138 rw->counter = -1; /* mark as write-locked */
139 mb();
140 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Thomas Gleixnere5931942009-12-03 20:08:46 +0100143static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
145 rw->counter = 0;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100146 arch_spin_unlock(&rw->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147}
148
Matthew Wilcox65ee8f0a2006-09-08 05:43:44 -0600149/* Note that we have to ensure interrupts are disabled in case we're
150 * interrupted by some other code that wants to read_trylock() this lock */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100151static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152{
Matthew Wilcox6e071852006-09-02 07:54:58 -0600153 unsigned long flags;
154 int result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Matthew Wilcox6e071852006-09-02 07:54:58 -0600156 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100157 if (arch_spin_trylock(&rw->lock)) {
Matthew Wilcox6e071852006-09-02 07:54:58 -0600158 if (rw->counter == 0) {
159 rw->counter = -1;
160 result = 1;
161 } else {
162 /* Read-locked. Oh well. */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100163 arch_spin_unlock(&rw->lock);
Matthew Wilcox6e071852006-09-02 07:54:58 -0600164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 }
Matthew Wilcox6e071852006-09-02 07:54:58 -0600166 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
Matthew Wilcox6e071852006-09-02 07:54:58 -0600168 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Kyle McMartinbc8846c2006-03-24 21:22:02 -0700171/*
172 * read_can_lock - would read_trylock() succeed?
173 * @lock: the rwlock in question.
174 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100175static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
Kyle McMartinbc8846c2006-03-24 21:22:02 -0700177 return rw->counter >= 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
Kyle McMartinbc8846c2006-03-24 21:22:02 -0700180/*
181 * write_can_lock - would write_trylock() succeed?
182 * @lock: the rwlock in question.
183 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100184static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
Kyle McMartinbc8846c2006-03-24 21:22:02 -0700186 return !rw->counter;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
Thomas Gleixnere5931942009-12-03 20:08:46 +0100189#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
190#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Robin Holtf5f7eac2009-04-02 16:59:46 -0700191
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100192#define arch_spin_relax(lock) cpu_relax()
193#define arch_read_relax(lock) cpu_relax()
194#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196#endif /* __ASM_SPINLOCK_H */