blob: 840281095933e41d41acdf16ff4e7fa7dae5e06d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_SPINLOCK_H
2#define __LINUX_SPINLOCK_H
3
4/*
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07005 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +010011 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070012 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010017 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070018 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010037 * contains the arch_spin_*()/etc. version of UP
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070038 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 */
48
Steven Rostedt3f3078912008-07-25 01:45:25 -070049#include <linux/typecheck.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/preempt.h>
51#include <linux/linkage.h>
52#include <linux/compiler.h>
David Howellsdf9ee292010-10-07 14:08:55 +010053#include <linux/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#include <linux/thread_info.h>
55#include <linux/kernel.h>
56#include <linux/stringify.h>
Andrew Morton676dcb82006-12-06 20:31:30 -080057#include <linux/bottom_half.h>
David Howells96f951e2012-03-28 18:30:03 +010058#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61/*
62 * Must define these before including other files, inline functions need them
63 */
Denys Vlasenko75ddb0e2010-02-20 01:03:48 +010064#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66#define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73#define LOCK_SECTION_END \
74 ".previous\n\t"
75
Harvey Harrisonec701582008-02-08 04:19:55 -080076#define __lockfunc __attribute__((section(".spinlock.text")))
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78/*
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +010079 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070081#include <linux/spinlock_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070083/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -030084 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070085 */
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070086#ifdef CONFIG_SMP
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070087# include <asm/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#else
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070089# include <linux/spinlock_up.h>
90#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070092#ifdef CONFIG_DEBUG_SPINLOCK
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010093 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95# define raw_spin_lock_init(lock) \
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070096do { \
97 static struct lock_class_key __key; \
98 \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010099 __raw_spin_lock_init((lock), #lock, &__key); \
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700100} while (0)
101
102#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100103# define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700105#endif
106
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100107#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700108
Nick Piggin95c354f2008-01-30 13:31:20 +0100109#ifdef CONFIG_GENERIC_LOCKBREAK
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100110#define raw_spin_is_contended(lock) ((lock)->break_lock)
Nick Piggin95c354f2008-01-30 13:31:20 +0100111#else
Kyle McMartina5ef7ca2009-02-08 17:39:58 -0500112
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100113#ifdef arch_spin_is_contended
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100114#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
Kyle McMartina5ef7ca2009-02-08 17:39:58 -0500115#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100116#define raw_spin_is_contended(lock) (((void)(lock), 0))
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100117#endif /*arch_spin_is_contended*/
Nick Piggin95c354f2008-01-30 13:31:20 +0100118#endif
119
Oleg Nesterove0acd0a2013-08-12 18:14:00 +0200120/*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
Will Deacond9560282015-03-31 09:39:41 +0100123 * can not be reordered with LOADs and STOREs inside this section.
Oleg Nesterove0acd0a2013-08-12 18:14:00 +0200124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129#ifndef smp_mb__before_spinlock
130#define smp_mb__before_spinlock() smp_wmb()
Jiri Olsaad462762009-07-08 12:10:31 +0000131#endif
132
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200133/*
134 * This barrier must provide two things:
135 *
136 * - it must guarantee a STORE before the spin_lock() is ordered against a
137 * LOAD after it, see the comments at its two usage sites.
138 *
139 * - it must ensure the critical section is RCsc.
140 *
141 * The latter is important for cases where we observe values written by other
142 * CPUs in spin-loops, without barriers, while being subject to scheduling.
143 *
144 * CPU0 CPU1 CPU2
145 *
146 * for (;;) {
147 * if (READ_ONCE(X))
148 * break;
149 * }
150 * X=1
151 * <sched-out>
152 * <sched-in>
153 * r = X;
154 *
155 * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
156 * we get migrated and CPU2 sees X==0.
157 *
158 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
159 * the LL/SC loop, they need no further barriers. Similarly all our TSO
160 * architectures imply an smp_mb() for each atomic instruction and equally don't
161 * need more.
162 *
163 * Architectures that can implement ACQUIRE better need to take care.
164 */
165#ifndef smp_mb__after_spinlock
166#define smp_mb__after_spinlock() do { } while (0)
167#endif
168
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700169/**
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100170 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700171 * @lock: the spinlock in question.
172 */
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100173#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175#ifdef CONFIG_DEBUG_SPINLOCK
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800176 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100177#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
178 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800179 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180#else
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800181static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100182{
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800183 __acquire(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100184 arch_spin_lock(&lock->raw_lock);
185}
186
187static inline void
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800188do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100189{
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800190 __acquire(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100191 arch_spin_lock_flags(&lock->raw_lock, *flags);
192}
193
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100194static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100195{
196 return arch_spin_trylock(&(lock)->raw_lock);
197}
198
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800199static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100200{
201 arch_spin_unlock(&lock->raw_lock);
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800202 __release(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100203}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204#endif
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206/*
Thomas Gleixneref12f102009-11-07 23:04:15 +0100207 * Define the various spin_lock methods. Note we define these
208 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
209 * various methods are defined as nops in the case they are not
210 * required.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 */
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100212#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100214#define raw_spin_lock(lock) _raw_spin_lock(lock)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700215
216#ifdef CONFIG_DEBUG_LOCK_ALLOC
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100217# define raw_spin_lock_nested(lock, subclass) \
218 _raw_spin_lock_nested(lock, subclass)
219
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100220# define raw_spin_lock_nest_lock(lock, nest_lock) \
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200221 do { \
222 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100223 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200224 } while (0)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700225#else
Bart Van Assche49992012014-08-08 12:35:36 +0200226/*
227 * Always evaluate the 'subclass' argument to avoid that the compiler
228 * warns about set-but-not-used variables when building with
229 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
230 */
231# define raw_spin_lock_nested(lock, subclass) \
232 _raw_spin_lock(((void)(subclass), (lock)))
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100233# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700234#endif
235
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700236#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
Linus Torvaldsb8e6ec82006-11-26 16:27:17 -0800237
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100238#define raw_spin_lock_irqsave(lock, flags) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700239 do { \
240 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100241 flags = _raw_spin_lock_irqsave(lock); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700242 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800243
244#ifdef CONFIG_DEBUG_LOCK_ALLOC
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100245#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700246 do { \
247 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100248 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700249 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800250#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100251#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700252 do { \
253 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100254 flags = _raw_spin_lock_irqsave(lock); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700255 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800256#endif
257
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258#else
Linus Torvaldsb8e6ec82006-11-26 16:27:17 -0800259
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100260#define raw_spin_lock_irqsave(lock, flags) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700261 do { \
262 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100263 _raw_spin_lock_irqsave(lock, flags); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700264 } while (0)
Thomas Gleixneref12f102009-11-07 23:04:15 +0100265
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100266#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
267 raw_spin_lock_irqsave(lock, flags)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269#endif
270
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100271#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
272#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
273#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
274#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100276#define raw_spin_unlock_irqrestore(lock, flags) \
277 do { \
278 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100279 _raw_spin_unlock_irqrestore(lock, flags); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700280 } while (0)
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100281#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100283#define raw_spin_trylock_bh(lock) \
284 __cond_lock(lock, _raw_spin_trylock_bh(lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100286#define raw_spin_trylock_irq(lock) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287({ \
288 local_irq_disable(); \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100289 raw_spin_trylock(lock) ? \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700290 1 : ({ local_irq_enable(); 0; }); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291})
292
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100293#define raw_spin_trylock_irqsave(lock, flags) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294({ \
295 local_irq_save(flags); \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100296 raw_spin_trylock(lock) ? \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700297 1 : ({ local_irq_restore(flags); 0; }); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298})
299
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100300/**
301 * raw_spin_can_lock - would raw_spin_trylock() succeed?
302 * @lock: the spinlock in question.
303 */
304#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
305
306/* Include rwlock functions */
307#include <linux/rwlock.h>
308
309/*
310 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
311 */
312#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
313# include <linux/spinlock_api_smp.h>
314#else
315# include <linux/spinlock_api_up.h>
316#endif
317
318/*
319 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
320 */
321
Denys Vlasenko34905652015-07-13 20:31:03 +0200322static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100323{
324 return &lock->rlock;
325}
326
327#define spin_lock_init(_lock) \
328do { \
329 spinlock_check(_lock); \
330 raw_spin_lock_init(&(_lock)->rlock); \
331} while (0)
332
Denys Vlasenko34905652015-07-13 20:31:03 +0200333static __always_inline void spin_lock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100334{
335 raw_spin_lock(&lock->rlock);
336}
337
Denys Vlasenko34905652015-07-13 20:31:03 +0200338static __always_inline void spin_lock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100339{
340 raw_spin_lock_bh(&lock->rlock);
341}
342
Denys Vlasenko34905652015-07-13 20:31:03 +0200343static __always_inline int spin_trylock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100344{
345 return raw_spin_trylock(&lock->rlock);
346}
347
348#define spin_lock_nested(lock, subclass) \
349do { \
350 raw_spin_lock_nested(spinlock_check(lock), subclass); \
351} while (0)
352
353#define spin_lock_nest_lock(lock, nest_lock) \
354do { \
355 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
356} while (0)
357
Denys Vlasenko34905652015-07-13 20:31:03 +0200358static __always_inline void spin_lock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100359{
360 raw_spin_lock_irq(&lock->rlock);
361}
362
363#define spin_lock_irqsave(lock, flags) \
364do { \
365 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
366} while (0)
367
368#define spin_lock_irqsave_nested(lock, flags, subclass) \
369do { \
370 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
371} while (0)
372
Denys Vlasenko34905652015-07-13 20:31:03 +0200373static __always_inline void spin_unlock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100374{
375 raw_spin_unlock(&lock->rlock);
376}
377
Denys Vlasenko34905652015-07-13 20:31:03 +0200378static __always_inline void spin_unlock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100379{
380 raw_spin_unlock_bh(&lock->rlock);
381}
382
Denys Vlasenko34905652015-07-13 20:31:03 +0200383static __always_inline void spin_unlock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100384{
385 raw_spin_unlock_irq(&lock->rlock);
386}
387
Denys Vlasenko34905652015-07-13 20:31:03 +0200388static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100389{
390 raw_spin_unlock_irqrestore(&lock->rlock, flags);
391}
392
Denys Vlasenko34905652015-07-13 20:31:03 +0200393static __always_inline int spin_trylock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100394{
395 return raw_spin_trylock_bh(&lock->rlock);
396}
397
Denys Vlasenko34905652015-07-13 20:31:03 +0200398static __always_inline int spin_trylock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100399{
400 return raw_spin_trylock_irq(&lock->rlock);
401}
402
403#define spin_trylock_irqsave(lock, flags) \
404({ \
405 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
406})
407
Paul E. McKenney6016ffc2017-04-19 16:20:07 -0700408/**
409 * spin_unlock_wait - Interpose between successive critical sections
410 * @lock: the spinlock whose critical sections are to be interposed.
411 *
412 * Semantically this is equivalent to a spin_lock() immediately
413 * followed by a spin_unlock(). However, most architectures have
414 * more efficient implementations in which the spin_unlock_wait()
415 * cannot block concurrent lock acquisition, and in some cases
416 * where spin_unlock_wait() does not write to the lock variable.
417 * Nevertheless, spin_unlock_wait() can have high overhead, so if
418 * you feel the need to use it, please check to see if there is
419 * a better way to get your job done.
420 *
421 * The ordering guarantees provided by spin_unlock_wait() are:
422 *
423 * 1. All accesses preceding the spin_unlock_wait() happen before
424 * any accesses in later critical sections for this same lock.
425 * 2. All accesses following the spin_unlock_wait() happen after
426 * any accesses in earlier critical sections for this same lock.
427 */
Denys Vlasenko34905652015-07-13 20:31:03 +0200428static __always_inline void spin_unlock_wait(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100429{
430 raw_spin_unlock_wait(&lock->rlock);
431}
432
Denys Vlasenko34905652015-07-13 20:31:03 +0200433static __always_inline int spin_is_locked(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100434{
435 return raw_spin_is_locked(&lock->rlock);
436}
437
Denys Vlasenko34905652015-07-13 20:31:03 +0200438static __always_inline int spin_is_contended(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100439{
440 return raw_spin_is_contended(&lock->rlock);
441}
442
Denys Vlasenko34905652015-07-13 20:31:03 +0200443static __always_inline int spin_can_lock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100444{
445 return raw_spin_can_lock(&lock->rlock);
446}
447
Paul Gortmaker4ebc1b42012-01-20 18:20:37 -0500448#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100449
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700450/*
451 * Pull the atomic_t declaration:
452 * (asm-mips/atomic.h needs above definitions)
453 */
Arun Sharma600634972011-07-26 16:09:06 -0700454#include <linux/atomic.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700455/**
456 * atomic_dec_and_lock - lock on reaching reference count zero
457 * @atomic: the atomic counter
458 * @lock: the spinlock in question
J. Bruce Fieldsdc07e722008-04-07 15:59:05 -0400459 *
460 * Decrements @atomic by 1. If the result is 0, returns true and locks
461 * @lock. Returns false for all other cases.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700462 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700464#define atomic_dec_and_lock(atomic, lock) \
Josh Triplettdcc8e552006-09-29 02:01:03 -0700465 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467#endif /* __LINUX_SPINLOCK_H */