Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_SPINLOCK_API_SMP_H |
| 2 | #define __LINUX_SPINLOCK_API_SMP_H |
| 3 | |
| 4 | #ifndef __LINUX_SPINLOCK_H |
| 5 | # error "please don't include this file directly" |
| 6 | #endif |
| 7 | |
| 8 | /* |
| 9 | * include/linux/spinlock_api_smp.h |
| 10 | * |
| 11 | * spinlock API declarations on SMP (and debug) |
| 12 | * (implemented in kernel/spinlock.c) |
| 13 | * |
| 14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
| 15 | * Released under the General Public License (GPL). |
| 16 | */ |
| 17 | |
| 18 | int in_lock_functions(unsigned long addr); |
| 19 | |
| 20 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) |
| 21 | |
Josh Triplett | 9f50b93 | 2006-09-29 02:00:59 -0700 | [diff] [blame] | 22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 23 | void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) |
Josh Triplett | 9f50b93 | 2006-09-29 02:00:59 -0700 | [diff] [blame] | 24 | __acquires(lock); |
Peter Zijlstra | b7d39af | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 25 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) |
| 26 | __acquires(lock); |
Josh Triplett | 9f50b93 | 2006-09-29 02:00:59 -0700 | [diff] [blame] | 27 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); |
Josh Triplett | 9f50b93 | 2006-09-29 02:00:59 -0700 | [diff] [blame] | 28 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); |
Thomas Gleixner | 6b6b479 | 2009-11-16 18:48:37 +0100 | [diff] [blame] | 29 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 30 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) |
Josh Triplett | 9f50b93 | 2006-09-29 02:00:59 -0700 | [diff] [blame] | 31 | __acquires(lock); |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 32 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) |
| 33 | __acquires(lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 34 | int __lockfunc _spin_trylock(spinlock_t *lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 35 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); |
Josh Triplett | 9f50b93 | 2006-09-29 02:00:59 -0700 | [diff] [blame] | 36 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); |
Josh Triplett | 9f50b93 | 2006-09-29 02:00:59 -0700 | [diff] [blame] | 37 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); |
Josh Triplett | 9f50b93 | 2006-09-29 02:00:59 -0700 | [diff] [blame] | 38 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 39 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
Josh Triplett | 9f50b93 | 2006-09-29 02:00:59 -0700 | [diff] [blame] | 40 | __releases(lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 41 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 42 | #ifdef CONFIG_INLINE_SPIN_LOCK |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 43 | #define _spin_lock(lock) __spin_lock(lock) |
| 44 | #endif |
| 45 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 46 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 47 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) |
| 48 | #endif |
| 49 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 50 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 51 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) |
| 52 | #endif |
| 53 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 54 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 55 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) |
| 56 | #endif |
| 57 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 58 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 59 | #define _spin_trylock(lock) __spin_trylock(lock) |
| 60 | #endif |
| 61 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 62 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 63 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) |
| 64 | #endif |
| 65 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 66 | #ifdef CONFIG_INLINE_SPIN_UNLOCK |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 67 | #define _spin_unlock(lock) __spin_unlock(lock) |
| 68 | #endif |
| 69 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 70 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 71 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) |
| 72 | #endif |
| 73 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 74 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 75 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) |
| 76 | #endif |
| 77 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 78 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 79 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) |
| 80 | #endif |
| 81 | |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 82 | static inline int __spin_trylock(spinlock_t *lock) |
| 83 | { |
| 84 | preempt_disable(); |
| 85 | if (_raw_spin_trylock(lock)) { |
| 86 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 87 | return 1; |
| 88 | } |
| 89 | preempt_enable(); |
| 90 | return 0; |
| 91 | } |
| 92 | |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 93 | /* |
| 94 | * If lockdep is enabled then we use the non-preemption spin-ops |
| 95 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
| 96 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
| 97 | */ |
| 98 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
| 99 | |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 100 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) |
| 101 | { |
| 102 | unsigned long flags; |
| 103 | |
| 104 | local_irq_save(flags); |
| 105 | preempt_disable(); |
| 106 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| 107 | /* |
| 108 | * On lockdep we dont want the hand-coded irq-enable of |
| 109 | * _raw_spin_lock_flags() code, because lockdep assumes |
| 110 | * that interrupts are not re-enabled during lock-acquire: |
| 111 | */ |
| 112 | #ifdef CONFIG_LOCKDEP |
| 113 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
| 114 | #else |
| 115 | _raw_spin_lock_flags(lock, &flags); |
| 116 | #endif |
| 117 | return flags; |
| 118 | } |
| 119 | |
| 120 | static inline void __spin_lock_irq(spinlock_t *lock) |
| 121 | { |
| 122 | local_irq_disable(); |
| 123 | preempt_disable(); |
| 124 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| 125 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
| 126 | } |
| 127 | |
| 128 | static inline void __spin_lock_bh(spinlock_t *lock) |
| 129 | { |
| 130 | local_bh_disable(); |
| 131 | preempt_disable(); |
| 132 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| 133 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
| 134 | } |
| 135 | |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 136 | static inline void __spin_lock(spinlock_t *lock) |
| 137 | { |
| 138 | preempt_disable(); |
| 139 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| 140 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
| 141 | } |
| 142 | |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 143 | #endif /* CONFIG_PREEMPT */ |
| 144 | |
| 145 | static inline void __spin_unlock(spinlock_t *lock) |
| 146 | { |
| 147 | spin_release(&lock->dep_map, 1, _RET_IP_); |
| 148 | _raw_spin_unlock(lock); |
| 149 | preempt_enable(); |
| 150 | } |
| 151 | |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 152 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, |
| 153 | unsigned long flags) |
| 154 | { |
| 155 | spin_release(&lock->dep_map, 1, _RET_IP_); |
| 156 | _raw_spin_unlock(lock); |
| 157 | local_irq_restore(flags); |
| 158 | preempt_enable(); |
| 159 | } |
| 160 | |
| 161 | static inline void __spin_unlock_irq(spinlock_t *lock) |
| 162 | { |
| 163 | spin_release(&lock->dep_map, 1, _RET_IP_); |
| 164 | _raw_spin_unlock(lock); |
| 165 | local_irq_enable(); |
| 166 | preempt_enable(); |
| 167 | } |
| 168 | |
| 169 | static inline void __spin_unlock_bh(spinlock_t *lock) |
| 170 | { |
| 171 | spin_release(&lock->dep_map, 1, _RET_IP_); |
| 172 | _raw_spin_unlock(lock); |
| 173 | preempt_enable_no_resched(); |
| 174 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
| 175 | } |
| 176 | |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 177 | static inline int __spin_trylock_bh(spinlock_t *lock) |
| 178 | { |
| 179 | local_bh_disable(); |
| 180 | preempt_disable(); |
| 181 | if (_raw_spin_trylock(lock)) { |
| 182 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 183 | return 1; |
| 184 | } |
| 185 | preempt_enable_no_resched(); |
| 186 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
| 187 | return 0; |
| 188 | } |
| 189 | |
Thomas Gleixner | 6b6b479 | 2009-11-16 18:48:37 +0100 | [diff] [blame] | 190 | #include <linux/rwlock_api_smp.h> |
| 191 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 192 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |