Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_SPINLOCK_API_SMP_H |
| 2 | #define __LINUX_SPINLOCK_API_SMP_H |
| 3 | |
| 4 | #ifndef __LINUX_SPINLOCK_H |
| 5 | # error "please don't include this file directly" |
| 6 | #endif |
| 7 | |
| 8 | /* |
| 9 | * include/linux/spinlock_api_smp.h |
| 10 | * |
| 11 | * spinlock API declarations on SMP (and debug) |
| 12 | * (implemented in kernel/spinlock.c) |
| 13 | * |
| 14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
| 15 | * Released under the General Public License (GPL). |
| 16 | */ |
| 17 | |
| 18 | int in_lock_functions(unsigned long addr); |
| 19 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 21 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 22 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
| 23 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) |
| 24 | __acquires(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 25 | void __lockfunc |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 26 | _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) |
| 27 | __acquires(lock); |
| 28 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); |
| 29 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) |
| 30 | __acquires(lock); |
Thomas Gleixner | 6b6b479 | 2009-11-16 18:48:37 +0100 | [diff] [blame] | 31 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 32 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) |
| 33 | __acquires(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 34 | unsigned long __lockfunc |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 35 | _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) |
| 36 | __acquires(lock); |
| 37 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); |
| 38 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); |
| 39 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
| 40 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); |
| 41 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 42 | void __lockfunc |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 43 | _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
| 44 | __releases(lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 45 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 46 | #ifdef CONFIG_INLINE_SPIN_LOCK |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 47 | #define _raw_spin_lock(lock) __raw_spin_lock(lock) |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 48 | #endif |
| 49 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 50 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 51 | #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 52 | #endif |
| 53 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 54 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 55 | #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 56 | #endif |
| 57 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 58 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 59 | #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 60 | #endif |
| 61 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 62 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 63 | #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 64 | #endif |
| 65 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 66 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 67 | #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 68 | #endif |
| 69 | |
Raghavendra K T | e335e3e | 2012-03-22 15:25:08 +0530 | [diff] [blame] | 70 | #ifndef CONFIG_UNINLINE_SPIN_UNLOCK |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 71 | #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 72 | #endif |
| 73 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 74 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 75 | #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 76 | #endif |
| 77 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 78 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 79 | #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 80 | #endif |
| 81 | |
Thomas Gleixner | 6beb000 | 2009-11-09 15:21:34 +0000 | [diff] [blame] | 82 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 83 | #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) |
Heiko Carstens | 892a7c6 | 2009-08-31 14:43:37 +0200 | [diff] [blame] | 84 | #endif |
| 85 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 86 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 87 | { |
| 88 | preempt_disable(); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 89 | if (do_raw_spin_trylock(lock)) { |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 90 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 91 | return 1; |
| 92 | } |
| 93 | preempt_enable(); |
| 94 | return 0; |
| 95 | } |
| 96 | |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 97 | /* |
| 98 | * If lockdep is enabled then we use the non-preemption spin-ops |
| 99 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
| 100 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
| 101 | */ |
| 102 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
| 103 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 104 | static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 105 | { |
| 106 | unsigned long flags; |
| 107 | |
| 108 | local_irq_save(flags); |
| 109 | preempt_disable(); |
| 110 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| 111 | /* |
| 112 | * On lockdep we dont want the hand-coded irq-enable of |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 113 | * do_raw_spin_lock_flags() code, because lockdep assumes |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 114 | * that interrupts are not re-enabled during lock-acquire: |
| 115 | */ |
| 116 | #ifdef CONFIG_LOCKDEP |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 117 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 118 | #else |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 119 | do_raw_spin_lock_flags(lock, &flags); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 120 | #endif |
| 121 | return flags; |
| 122 | } |
| 123 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 124 | static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 125 | { |
| 126 | local_irq_disable(); |
| 127 | preempt_disable(); |
| 128 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 129 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 130 | } |
| 131 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 132 | static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 133 | { |
Peter Zijlstra | 9ea4c38 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 134 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 135 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 136 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 137 | } |
| 138 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 139 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 140 | { |
| 141 | preempt_disable(); |
| 142 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 143 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 144 | } |
| 145 | |
Chen Gang | ae58403 | 2013-06-19 11:15:46 +0800 | [diff] [blame] | 146 | #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 147 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 148 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 149 | { |
| 150 | spin_release(&lock->dep_map, 1, _RET_IP_); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 151 | do_raw_spin_unlock(lock); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 152 | preempt_enable(); |
| 153 | } |
| 154 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 155 | static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 156 | unsigned long flags) |
| 157 | { |
| 158 | spin_release(&lock->dep_map, 1, _RET_IP_); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 159 | do_raw_spin_unlock(lock); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 160 | local_irq_restore(flags); |
| 161 | preempt_enable(); |
| 162 | } |
| 163 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 164 | static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 165 | { |
| 166 | spin_release(&lock->dep_map, 1, _RET_IP_); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 167 | do_raw_spin_unlock(lock); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 168 | local_irq_enable(); |
| 169 | preempt_enable(); |
| 170 | } |
| 171 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 172 | static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 173 | { |
| 174 | spin_release(&lock->dep_map, 1, _RET_IP_); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 175 | do_raw_spin_unlock(lock); |
Peter Zijlstra | 9ea4c38 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 176 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 177 | } |
| 178 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 179 | static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 180 | { |
Peter Zijlstra | 9ea4c38 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 181 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 182 | if (do_raw_spin_trylock(lock)) { |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 183 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 184 | return 1; |
| 185 | } |
Peter Zijlstra | 9ea4c38 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 186 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
Heiko Carstens | 69d0ee7 | 2009-08-31 14:43:36 +0200 | [diff] [blame] | 187 | return 0; |
| 188 | } |
| 189 | |
Thomas Gleixner | 6b6b479 | 2009-11-16 18:48:37 +0100 | [diff] [blame] | 190 | #include <linux/rwlock_api_smp.h> |
| 191 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 192 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |