blob: 1be1fc57fc4be0e613a8c9949e2b26bf781af701 [file] [log] [blame]
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07001#ifndef __LINUX_SPINLOCK_API_SMP_H
2#define __LINUX_SPINLOCK_API_SMP_H
3
4#ifndef __LINUX_SPINLOCK_H
5# error "please don't include this file directly"
6#endif
7
8/*
9 * include/linux/spinlock_api_smp.h
10 *
11 * spinlock API declarations on SMP (and debug)
12 * (implemented in kernel/spinlock.c)
13 *
14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15 * Released under the General Public License (GPL).
16 */
17
18int in_lock_functions(unsigned long addr);
19
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010020#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070021
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010022void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock);
23void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
Josh Triplett9f50b932006-09-29 02:00:59 -070024 __acquires(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010025void __lockfunc
26_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
Peter Zijlstrab7d39af2008-08-11 09:30:24 +020027 __acquires(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010028void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock);
Thomas Gleixner6b6b4792009-11-16 18:48:37 +010030
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010031unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
Josh Triplett9f50b932006-09-29 02:00:59 -070032 __acquires(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010033unsigned long __lockfunc
34_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -080035 __acquires(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010036int __lockfunc _spin_trylock(raw_spinlock_t *lock);
37int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock);
38void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock);
39void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
40void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
41void __lockfunc
42_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
Josh Triplett9f50b932006-09-29 02:00:59 -070043 __releases(lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070044
Thomas Gleixner6beb0002009-11-09 15:21:34 +000045#ifdef CONFIG_INLINE_SPIN_LOCK
Heiko Carstens892a7c62009-08-31 14:43:37 +020046#define _spin_lock(lock) __spin_lock(lock)
47#endif
48
Thomas Gleixner6beb0002009-11-09 15:21:34 +000049#ifdef CONFIG_INLINE_SPIN_LOCK_BH
Heiko Carstens892a7c62009-08-31 14:43:37 +020050#define _spin_lock_bh(lock) __spin_lock_bh(lock)
51#endif
52
Thomas Gleixner6beb0002009-11-09 15:21:34 +000053#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
Heiko Carstens892a7c62009-08-31 14:43:37 +020054#define _spin_lock_irq(lock) __spin_lock_irq(lock)
55#endif
56
Thomas Gleixner6beb0002009-11-09 15:21:34 +000057#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
Heiko Carstens892a7c62009-08-31 14:43:37 +020058#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
59#endif
60
Thomas Gleixner6beb0002009-11-09 15:21:34 +000061#ifdef CONFIG_INLINE_SPIN_TRYLOCK
Heiko Carstens892a7c62009-08-31 14:43:37 +020062#define _spin_trylock(lock) __spin_trylock(lock)
63#endif
64
Thomas Gleixner6beb0002009-11-09 15:21:34 +000065#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
Heiko Carstens892a7c62009-08-31 14:43:37 +020066#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
67#endif
68
Thomas Gleixner6beb0002009-11-09 15:21:34 +000069#ifdef CONFIG_INLINE_SPIN_UNLOCK
Heiko Carstens892a7c62009-08-31 14:43:37 +020070#define _spin_unlock(lock) __spin_unlock(lock)
71#endif
72
Thomas Gleixner6beb0002009-11-09 15:21:34 +000073#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
Heiko Carstens892a7c62009-08-31 14:43:37 +020074#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
75#endif
76
Thomas Gleixner6beb0002009-11-09 15:21:34 +000077#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
Heiko Carstens892a7c62009-08-31 14:43:37 +020078#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
79#endif
80
Thomas Gleixner6beb0002009-11-09 15:21:34 +000081#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
Heiko Carstens892a7c62009-08-31 14:43:37 +020082#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
83#endif
84
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010085static inline int __spin_trylock(raw_spinlock_t *lock)
Heiko Carstens69d0ee72009-08-31 14:43:36 +020086{
87 preempt_disable();
Thomas Gleixner9828ea92009-12-03 20:55:53 +010088 if (do_raw_spin_trylock(lock)) {
Heiko Carstens69d0ee72009-08-31 14:43:36 +020089 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
90 return 1;
91 }
92 preempt_enable();
93 return 0;
94}
95
Heiko Carstens69d0ee72009-08-31 14:43:36 +020096/*
97 * If lockdep is enabled then we use the non-preemption spin-ops
98 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
99 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
100 */
101#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
102
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100103static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock)
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200104{
105 unsigned long flags;
106
107 local_irq_save(flags);
108 preempt_disable();
109 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
110 /*
111 * On lockdep we dont want the hand-coded irq-enable of
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100112 * do_raw_spin_lock_flags() code, because lockdep assumes
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200113 * that interrupts are not re-enabled during lock-acquire:
114 */
115#ifdef CONFIG_LOCKDEP
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100116 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200117#else
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100118 do_raw_spin_lock_flags(lock, &flags);
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200119#endif
120 return flags;
121}
122
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100123static inline void __spin_lock_irq(raw_spinlock_t *lock)
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200124{
125 local_irq_disable();
126 preempt_disable();
127 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100128 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200129}
130
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100131static inline void __spin_lock_bh(raw_spinlock_t *lock)
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200132{
133 local_bh_disable();
134 preempt_disable();
135 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100136 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200137}
138
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100139static inline void __spin_lock(raw_spinlock_t *lock)
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200140{
141 preempt_disable();
142 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100143 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200144}
145
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200146#endif /* CONFIG_PREEMPT */
147
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100148static inline void __spin_unlock(raw_spinlock_t *lock)
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200149{
150 spin_release(&lock->dep_map, 1, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100151 do_raw_spin_unlock(lock);
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200152 preempt_enable();
153}
154
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100155static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200156 unsigned long flags)
157{
158 spin_release(&lock->dep_map, 1, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100159 do_raw_spin_unlock(lock);
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200160 local_irq_restore(flags);
161 preempt_enable();
162}
163
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100164static inline void __spin_unlock_irq(raw_spinlock_t *lock)
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200165{
166 spin_release(&lock->dep_map, 1, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100167 do_raw_spin_unlock(lock);
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200168 local_irq_enable();
169 preempt_enable();
170}
171
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100172static inline void __spin_unlock_bh(raw_spinlock_t *lock)
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200173{
174 spin_release(&lock->dep_map, 1, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100175 do_raw_spin_unlock(lock);
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200176 preempt_enable_no_resched();
177 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
178}
179
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100180static inline int __spin_trylock_bh(raw_spinlock_t *lock)
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200181{
182 local_bh_disable();
183 preempt_disable();
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100184 if (do_raw_spin_trylock(lock)) {
Heiko Carstens69d0ee72009-08-31 14:43:36 +0200185 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
186 return 1;
187 }
188 preempt_enable_no_resched();
189 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
190 return 0;
191}
192
Thomas Gleixner6b6b4792009-11-16 18:48:37 +0100193#include <linux/rwlock_api_smp.h>
194
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700195#endif /* __LINUX_SPINLOCK_API_SMP_H */