Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H |
| 2 | #define __ASM_SPINLOCK_H |
| 3 | |
| 4 | #include <asm/system.h> |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 5 | #include <asm/processor.h> |
| 6 | #include <asm/spinlock_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | { |
| 10 | volatile unsigned int *a = __ldcw_align(x); |
| 11 | return *a == 0; |
| 12 | } |
| 13 | |
James Bottomley | 08dc2ca | 2005-11-17 16:35:09 -0500 | [diff] [blame] | 14 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 15 | #define __raw_spin_unlock_wait(x) \ |
| 16 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
James Bottomley | 08dc2ca | 2005-11-17 16:35:09 -0500 | [diff] [blame] | 18 | static inline void __raw_spin_lock_flags(raw_spinlock_t *x, |
| 19 | unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | { |
| 21 | volatile unsigned int *a; |
| 22 | |
| 23 | mb(); |
| 24 | a = __ldcw_align(x); |
| 25 | while (__ldcw(a) == 0) |
James Bottomley | 08dc2ca | 2005-11-17 16:35:09 -0500 | [diff] [blame] | 26 | while (*a == 0) |
| 27 | if (flags & PSW_SM_I) { |
| 28 | local_irq_enable(); |
| 29 | cpu_relax(); |
| 30 | local_irq_disable(); |
| 31 | } else |
| 32 | cpu_relax(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | mb(); |
| 34 | } |
| 35 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 36 | static inline void __raw_spin_unlock(raw_spinlock_t *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | { |
| 38 | volatile unsigned int *a; |
| 39 | mb(); |
| 40 | a = __ldcw_align(x); |
| 41 | *a = 1; |
| 42 | mb(); |
| 43 | } |
| 44 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 45 | static inline int __raw_spin_trylock(raw_spinlock_t *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | { |
| 47 | volatile unsigned int *a; |
| 48 | int ret; |
| 49 | |
| 50 | mb(); |
| 51 | a = __ldcw_align(x); |
| 52 | ret = __ldcw(a) != 0; |
| 53 | mb(); |
| 54 | |
| 55 | return ret; |
| 56 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
| 58 | /* |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 59 | * Read-write spinlocks, allowing multiple readers but only one writer. |
Matthew Wilcox | 65ee8f0 | 2006-09-08 05:43:44 -0600 | [diff] [blame] | 60 | * Linux rwlocks are unfair to writers; they can be starved for an indefinite |
| 61 | * time by readers. With care, they can also be taken in interrupt context. |
| 62 | * |
| 63 | * In the PA-RISC implementation, we have a spinlock and a counter. |
| 64 | * Readers use the lock to serialise their access to the counter (which |
| 65 | * records how many readers currently hold the lock). |
| 66 | * Writers hold the spinlock, preventing any readers or other writers from |
| 67 | * grabbing the rwlock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | |
Matthew Wilcox | 65ee8f0 | 2006-09-08 05:43:44 -0600 | [diff] [blame] | 70 | /* Note that we have to ensure interrupts are disabled in case we're |
| 71 | * interrupted by some other code that wants to grab the same read lock */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 72 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | { |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 74 | unsigned long flags; |
| 75 | local_irq_save(flags); |
Matthew Wilcox | 65ee8f0 | 2006-09-08 05:43:44 -0600 | [diff] [blame] | 76 | __raw_spin_lock_flags(&rw->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | rw->counter++; |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 78 | __raw_spin_unlock(&rw->lock); |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 79 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
Matthew Wilcox | 65ee8f0 | 2006-09-08 05:43:44 -0600 | [diff] [blame] | 82 | /* Note that we have to ensure interrupts are disabled in case we're |
| 83 | * interrupted by some other code that wants to grab the same read lock */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 84 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | { |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 86 | unsigned long flags; |
| 87 | local_irq_save(flags); |
Matthew Wilcox | 65ee8f0 | 2006-09-08 05:43:44 -0600 | [diff] [blame] | 88 | __raw_spin_lock_flags(&rw->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | rw->counter--; |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 90 | __raw_spin_unlock(&rw->lock); |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 91 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | } |
| 93 | |
Matthew Wilcox | 65ee8f0 | 2006-09-08 05:43:44 -0600 | [diff] [blame] | 94 | /* Note that we have to ensure interrupts are disabled in case we're |
| 95 | * interrupted by some other code that wants to grab the same read lock */ |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 96 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | { |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 98 | unsigned long flags; |
| 99 | retry: |
| 100 | local_irq_save(flags); |
| 101 | if (__raw_spin_trylock(&rw->lock)) { |
| 102 | rw->counter++; |
| 103 | __raw_spin_unlock(&rw->lock); |
| 104 | local_irq_restore(flags); |
| 105 | return 1; |
| 106 | } |
| 107 | |
| 108 | local_irq_restore(flags); |
| 109 | /* If write-locked, we fail to acquire the lock */ |
| 110 | if (rw->counter < 0) |
| 111 | return 0; |
| 112 | |
| 113 | /* Wait until we have a realistic chance at the lock */ |
| 114 | while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) |
| 115 | cpu_relax(); |
| 116 | |
| 117 | goto retry; |
| 118 | } |
| 119 | |
Matthew Wilcox | 65ee8f0 | 2006-09-08 05:43:44 -0600 | [diff] [blame] | 120 | /* Note that we have to ensure interrupts are disabled in case we're |
| 121 | * interrupted by some other code that wants to read_trylock() this lock */ |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 122 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
| 123 | { |
| 124 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | retry: |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 126 | local_irq_save(flags); |
Matthew Wilcox | 65ee8f0 | 2006-09-08 05:43:44 -0600 | [diff] [blame] | 127 | __raw_spin_lock_flags(&rw->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 129 | if (rw->counter != 0) { |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 130 | __raw_spin_unlock(&rw->lock); |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 131 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 133 | while (rw->counter != 0) |
| 134 | cpu_relax(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
| 136 | goto retry; |
| 137 | } |
| 138 | |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 139 | rw->counter = -1; /* mark as write-locked */ |
| 140 | mb(); |
| 141 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 144 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | { |
| 146 | rw->counter = 0; |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 147 | __raw_spin_unlock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | } |
| 149 | |
Matthew Wilcox | 65ee8f0 | 2006-09-08 05:43:44 -0600 | [diff] [blame] | 150 | /* Note that we have to ensure interrupts are disabled in case we're |
| 151 | * interrupted by some other code that wants to read_trylock() this lock */ |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 152 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | { |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 154 | unsigned long flags; |
| 155 | int result = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 157 | local_irq_save(flags); |
| 158 | if (__raw_spin_trylock(&rw->lock)) { |
| 159 | if (rw->counter == 0) { |
| 160 | rw->counter = -1; |
| 161 | result = 1; |
| 162 | } else { |
| 163 | /* Read-locked. Oh well. */ |
| 164 | __raw_spin_unlock(&rw->lock); |
| 165 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | } |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 167 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
Matthew Wilcox | 6e07185 | 2006-09-02 07:54:58 -0600 | [diff] [blame] | 169 | return result; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | |
Kyle McMartin | bc8846c | 2006-03-24 21:22:02 -0700 | [diff] [blame] | 172 | /* |
| 173 | * read_can_lock - would read_trylock() succeed? |
| 174 | * @lock: the rwlock in question. |
| 175 | */ |
| 176 | static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | { |
Kyle McMartin | bc8846c | 2006-03-24 21:22:02 -0700 | [diff] [blame] | 178 | return rw->counter >= 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | } |
| 180 | |
Kyle McMartin | bc8846c | 2006-03-24 21:22:02 -0700 | [diff] [blame] | 181 | /* |
| 182 | * write_can_lock - would write_trylock() succeed? |
| 183 | * @lock: the rwlock in question. |
| 184 | */ |
| 185 | static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | { |
Kyle McMartin | bc8846c | 2006-03-24 21:22:02 -0700 | [diff] [blame] | 187 | return !rw->counter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | } |
| 189 | |
Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 190 | #define _raw_spin_relax(lock) cpu_relax() |
| 191 | #define _raw_read_relax(lock) cpu_relax() |
| 192 | #define _raw_write_relax(lock) cpu_relax() |
| 193 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | #endif /* __ASM_SPINLOCK_H */ |