Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_SPINLOCK_H |
| 2 | #define __LINUX_SPINLOCK_H |
| 3 | |
| 4 | /* |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
| 6 | * |
| 7 | * here's the role of the various spinlock/rwlock related include files: |
| 8 | * |
| 9 | * on SMP builds: |
| 10 | * |
Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 11 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 12 | * initializers |
| 13 | * |
| 14 | * linux/spinlock_types.h: |
| 15 | * defines the generic type and initializers |
| 16 | * |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 17 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 18 | * implementations, mostly inline assembly code |
| 19 | * |
| 20 | * (also included on UP-debug builds:) |
| 21 | * |
| 22 | * linux/spinlock_api_smp.h: |
| 23 | * contains the prototypes for the _spin_*() APIs. |
| 24 | * |
| 25 | * linux/spinlock.h: builds the final spin_*() APIs. |
| 26 | * |
| 27 | * on UP builds: |
| 28 | * |
| 29 | * linux/spinlock_type_up.h: |
| 30 | * contains the generic, simplified UP spinlock type. |
| 31 | * (which is an empty structure on non-debug builds) |
| 32 | * |
| 33 | * linux/spinlock_types.h: |
| 34 | * defines the generic type and initializers |
| 35 | * |
| 36 | * linux/spinlock_up.h: |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 37 | * contains the arch_spin_*()/etc. version of UP |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 38 | * builds. (which are NOPs on non-debug, non-preempt |
| 39 | * builds) |
| 40 | * |
| 41 | * (included on UP-non-debug builds:) |
| 42 | * |
| 43 | * linux/spinlock_api_up.h: |
| 44 | * builds the _spin_*() APIs. |
| 45 | * |
| 46 | * linux/spinlock.h: builds the final spin_*() APIs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | */ |
| 48 | |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 49 | #include <linux/typecheck.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #include <linux/preempt.h> |
| 51 | #include <linux/linkage.h> |
| 52 | #include <linux/compiler.h> |
David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 53 | #include <linux/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #include <linux/thread_info.h> |
| 55 | #include <linux/kernel.h> |
| 56 | #include <linux/stringify.h> |
Andrew Morton | 676dcb8 | 2006-12-06 20:31:30 -0800 | [diff] [blame] | 57 | #include <linux/bottom_half.h> |
David Howells | 96f951e | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 58 | #include <asm/barrier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
| 61 | /* |
| 62 | * Must define these before including other files, inline functions need them |
| 63 | */ |
Denys Vlasenko | 75ddb0e | 2010-02-20 01:03:48 +0100 | [diff] [blame] | 64 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
| 66 | #define LOCK_SECTION_START(extra) \ |
| 67 | ".subsection 1\n\t" \ |
| 68 | extra \ |
| 69 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ |
| 70 | LOCK_SECTION_NAME ":\n\t" \ |
| 71 | ".endif\n" |
| 72 | |
| 73 | #define LOCK_SECTION_END \ |
| 74 | ".previous\n\t" |
| 75 | |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 76 | #define __lockfunc __attribute__((section(".spinlock.text"))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | |
| 78 | /* |
Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 79 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 81 | #include <linux/spinlock_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 83 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 84 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 85 | */ |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 86 | #ifdef CONFIG_SMP |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 87 | # include <asm/spinlock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | #else |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 89 | # include <linux/spinlock_up.h> |
| 90 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 92 | #ifdef CONFIG_DEBUG_SPINLOCK |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 93 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
| 94 | struct lock_class_key *key); |
| 95 | # define raw_spin_lock_init(lock) \ |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 96 | do { \ |
| 97 | static struct lock_class_key __key; \ |
| 98 | \ |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 99 | __raw_spin_lock_init((lock), #lock, &__key); \ |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 100 | } while (0) |
| 101 | |
| 102 | #else |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 103 | # define raw_spin_lock_init(lock) \ |
| 104 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 105 | #endif |
| 106 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 107 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 108 | |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 109 | #ifdef CONFIG_GENERIC_LOCKBREAK |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 110 | #define raw_spin_is_contended(lock) ((lock)->break_lock) |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 111 | #else |
Kyle McMartin | a5ef7ca | 2009-02-08 17:39:58 -0500 | [diff] [blame] | 112 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 113 | #ifdef arch_spin_is_contended |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 114 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
Kyle McMartin | a5ef7ca | 2009-02-08 17:39:58 -0500 | [diff] [blame] | 115 | #else |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 116 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 117 | #endif /*arch_spin_is_contended*/ |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 118 | #endif |
| 119 | |
Oleg Nesterov | e0acd0a | 2013-08-12 18:14:00 +0200 | [diff] [blame] | 120 | /* |
| 121 | * Despite its name it doesn't necessarily has to be a full barrier. |
| 122 | * It should only guarantee that a STORE before the critical section |
Will Deacon | d956028 | 2015-03-31 09:39:41 +0100 | [diff] [blame] | 123 | * can not be reordered with LOADs and STOREs inside this section. |
Oleg Nesterov | e0acd0a | 2013-08-12 18:14:00 +0200 | [diff] [blame] | 124 | * spin_lock() is the one-way barrier, this LOAD can not escape out |
| 125 | * of the region. So the default implementation simply ensures that |
| 126 | * a STORE can not move into the critical section, smp_wmb() should |
| 127 | * serialize it with another STORE done by spin_lock(). |
| 128 | */ |
| 129 | #ifndef smp_mb__before_spinlock |
| 130 | #define smp_mb__before_spinlock() smp_wmb() |
Jiri Olsa | ad46276 | 2009-07-08 12:10:31 +0000 | [diff] [blame] | 131 | #endif |
| 132 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 133 | /** |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 134 | * raw_spin_unlock_wait - wait until the spinlock gets unlocked |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 135 | * @lock: the spinlock in question. |
| 136 | */ |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 137 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 138 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | #ifdef CONFIG_DEBUG_SPINLOCK |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 140 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 141 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
| 142 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 143 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | #else |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 145 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 146 | { |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 147 | __acquire(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 148 | arch_spin_lock(&lock->raw_lock); |
| 149 | } |
| 150 | |
| 151 | static inline void |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 152 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 153 | { |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 154 | __acquire(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 155 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
| 156 | } |
| 157 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 158 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 159 | { |
| 160 | return arch_spin_trylock(&(lock)->raw_lock); |
| 161 | } |
| 162 | |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 163 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 164 | { |
| 165 | arch_spin_unlock(&lock->raw_lock); |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 166 | __release(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 167 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | #endif |
| 169 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | /* |
Thomas Gleixner | ef12f10 | 2009-11-07 23:04:15 +0100 | [diff] [blame] | 171 | * Define the various spin_lock methods. Note we define these |
| 172 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The |
| 173 | * various methods are defined as nops in the case they are not |
| 174 | * required. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | */ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 176 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 178 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 179 | |
| 180 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 181 | # define raw_spin_lock_nested(lock, subclass) \ |
| 182 | _raw_spin_lock_nested(lock, subclass) |
| 183 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 184 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
Peter Zijlstra | b7d39af | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 185 | do { \ |
| 186 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 187 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
Peter Zijlstra | b7d39af | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 188 | } while (0) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 189 | #else |
Bart Van Assche | 4999201 | 2014-08-08 12:35:36 +0200 | [diff] [blame] | 190 | /* |
| 191 | * Always evaluate the 'subclass' argument to avoid that the compiler |
| 192 | * warns about set-but-not-used variables when building with |
| 193 | * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. |
| 194 | */ |
| 195 | # define raw_spin_lock_nested(lock, subclass) \ |
| 196 | _raw_spin_lock(((void)(subclass), (lock))) |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 197 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 198 | #endif |
| 199 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 200 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
Linus Torvalds | b8e6ec8 | 2006-11-26 16:27:17 -0800 | [diff] [blame] | 201 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 202 | #define raw_spin_lock_irqsave(lock, flags) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 203 | do { \ |
| 204 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 205 | flags = _raw_spin_lock_irqsave(lock); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 206 | } while (0) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 207 | |
| 208 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 209 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 210 | do { \ |
| 211 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 212 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 213 | } while (0) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 214 | #else |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 215 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 216 | do { \ |
| 217 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 218 | flags = _raw_spin_lock_irqsave(lock); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 219 | } while (0) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 220 | #endif |
| 221 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | #else |
Linus Torvalds | b8e6ec8 | 2006-11-26 16:27:17 -0800 | [diff] [blame] | 223 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 224 | #define raw_spin_lock_irqsave(lock, flags) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 225 | do { \ |
| 226 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 227 | _raw_spin_lock_irqsave(lock, flags); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 228 | } while (0) |
Thomas Gleixner | ef12f10 | 2009-11-07 23:04:15 +0100 | [diff] [blame] | 229 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 230 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
| 231 | raw_spin_lock_irqsave(lock, flags) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | #endif |
| 234 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 235 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
| 236 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) |
| 237 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) |
| 238 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 240 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
| 241 | do { \ |
| 242 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 243 | _raw_spin_unlock_irqrestore(lock, flags); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 244 | } while (0) |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 245 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 247 | #define raw_spin_trylock_bh(lock) \ |
| 248 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 250 | #define raw_spin_trylock_irq(lock) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | ({ \ |
| 252 | local_irq_disable(); \ |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 253 | raw_spin_trylock(lock) ? \ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 254 | 1 : ({ local_irq_enable(); 0; }); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | }) |
| 256 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 257 | #define raw_spin_trylock_irqsave(lock, flags) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | ({ \ |
| 259 | local_irq_save(flags); \ |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 260 | raw_spin_trylock(lock) ? \ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 261 | 1 : ({ local_irq_restore(flags); 0; }); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | }) |
| 263 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 264 | /** |
| 265 | * raw_spin_can_lock - would raw_spin_trylock() succeed? |
| 266 | * @lock: the spinlock in question. |
| 267 | */ |
| 268 | #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) |
| 269 | |
| 270 | /* Include rwlock functions */ |
| 271 | #include <linux/rwlock.h> |
| 272 | |
| 273 | /* |
| 274 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
| 275 | */ |
| 276 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
| 277 | # include <linux/spinlock_api_smp.h> |
| 278 | #else |
| 279 | # include <linux/spinlock_api_up.h> |
| 280 | #endif |
| 281 | |
| 282 | /* |
| 283 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n |
| 284 | */ |
| 285 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 286 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 287 | { |
| 288 | return &lock->rlock; |
| 289 | } |
| 290 | |
| 291 | #define spin_lock_init(_lock) \ |
| 292 | do { \ |
| 293 | spinlock_check(_lock); \ |
| 294 | raw_spin_lock_init(&(_lock)->rlock); \ |
| 295 | } while (0) |
| 296 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 297 | static __always_inline void spin_lock(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 298 | { |
| 299 | raw_spin_lock(&lock->rlock); |
| 300 | } |
| 301 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 302 | static __always_inline void spin_lock_bh(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 303 | { |
| 304 | raw_spin_lock_bh(&lock->rlock); |
| 305 | } |
| 306 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 307 | static __always_inline int spin_trylock(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 308 | { |
| 309 | return raw_spin_trylock(&lock->rlock); |
| 310 | } |
| 311 | |
| 312 | #define spin_lock_nested(lock, subclass) \ |
| 313 | do { \ |
| 314 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ |
| 315 | } while (0) |
| 316 | |
| 317 | #define spin_lock_nest_lock(lock, nest_lock) \ |
| 318 | do { \ |
| 319 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ |
| 320 | } while (0) |
| 321 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 322 | static __always_inline void spin_lock_irq(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 323 | { |
| 324 | raw_spin_lock_irq(&lock->rlock); |
| 325 | } |
| 326 | |
| 327 | #define spin_lock_irqsave(lock, flags) \ |
| 328 | do { \ |
| 329 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ |
| 330 | } while (0) |
| 331 | |
| 332 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ |
| 333 | do { \ |
| 334 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ |
| 335 | } while (0) |
| 336 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 337 | static __always_inline void spin_unlock(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 338 | { |
| 339 | raw_spin_unlock(&lock->rlock); |
| 340 | } |
| 341 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 342 | static __always_inline void spin_unlock_bh(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 343 | { |
| 344 | raw_spin_unlock_bh(&lock->rlock); |
| 345 | } |
| 346 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 347 | static __always_inline void spin_unlock_irq(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 348 | { |
| 349 | raw_spin_unlock_irq(&lock->rlock); |
| 350 | } |
| 351 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 352 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 353 | { |
| 354 | raw_spin_unlock_irqrestore(&lock->rlock, flags); |
| 355 | } |
| 356 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 357 | static __always_inline int spin_trylock_bh(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 358 | { |
| 359 | return raw_spin_trylock_bh(&lock->rlock); |
| 360 | } |
| 361 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 362 | static __always_inline int spin_trylock_irq(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 363 | { |
| 364 | return raw_spin_trylock_irq(&lock->rlock); |
| 365 | } |
| 366 | |
| 367 | #define spin_trylock_irqsave(lock, flags) \ |
| 368 | ({ \ |
| 369 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ |
| 370 | }) |
| 371 | |
Paul E. McKenney | 6016ffc | 2017-04-19 16:20:07 -0700 | [diff] [blame] | 372 | /** |
| 373 | * spin_unlock_wait - Interpose between successive critical sections |
| 374 | * @lock: the spinlock whose critical sections are to be interposed. |
| 375 | * |
| 376 | * Semantically this is equivalent to a spin_lock() immediately |
| 377 | * followed by a spin_unlock(). However, most architectures have |
| 378 | * more efficient implementations in which the spin_unlock_wait() |
| 379 | * cannot block concurrent lock acquisition, and in some cases |
| 380 | * where spin_unlock_wait() does not write to the lock variable. |
| 381 | * Nevertheless, spin_unlock_wait() can have high overhead, so if |
| 382 | * you feel the need to use it, please check to see if there is |
| 383 | * a better way to get your job done. |
| 384 | * |
| 385 | * The ordering guarantees provided by spin_unlock_wait() are: |
| 386 | * |
| 387 | * 1. All accesses preceding the spin_unlock_wait() happen before |
| 388 | * any accesses in later critical sections for this same lock. |
| 389 | * 2. All accesses following the spin_unlock_wait() happen after |
| 390 | * any accesses in earlier critical sections for this same lock. |
| 391 | */ |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 392 | static __always_inline void spin_unlock_wait(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 393 | { |
| 394 | raw_spin_unlock_wait(&lock->rlock); |
| 395 | } |
| 396 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 397 | static __always_inline int spin_is_locked(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 398 | { |
| 399 | return raw_spin_is_locked(&lock->rlock); |
| 400 | } |
| 401 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 402 | static __always_inline int spin_is_contended(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 403 | { |
| 404 | return raw_spin_is_contended(&lock->rlock); |
| 405 | } |
| 406 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 407 | static __always_inline int spin_can_lock(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 408 | { |
| 409 | return raw_spin_can_lock(&lock->rlock); |
| 410 | } |
| 411 | |
Paul Gortmaker | 4ebc1b4 | 2012-01-20 18:20:37 -0500 | [diff] [blame] | 412 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 413 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 414 | /* |
| 415 | * Pull the atomic_t declaration: |
| 416 | * (asm-mips/atomic.h needs above definitions) |
| 417 | */ |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 418 | #include <linux/atomic.h> |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 419 | /** |
| 420 | * atomic_dec_and_lock - lock on reaching reference count zero |
| 421 | * @atomic: the atomic counter |
| 422 | * @lock: the spinlock in question |
J. Bruce Fields | dc07e72 | 2008-04-07 15:59:05 -0400 | [diff] [blame] | 423 | * |
| 424 | * Decrements @atomic by 1. If the result is 0, returns true and locks |
| 425 | * @lock. Returns false for all other cases. |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 426 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 428 | #define atomic_dec_and_lock(atomic, lock) \ |
Josh Triplett | dcc8e55 | 2006-09-29 02:01:03 -0700 | [diff] [blame] | 429 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | #endif /* __LINUX_SPINLOCK_H */ |