blob: a39186194cd6782ac0b6705f02af6179a86f7813 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __LINUX_SPINLOCK_H
3#define __LINUX_SPINLOCK_H
4
5/*
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07006 * include/linux/spinlock.h - generic spinlock/rwlock declarations
7 *
8 * here's the role of the various spinlock/rwlock related include files:
9 *
10 * on SMP builds:
11 *
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +010012 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070013 * initializers
14 *
15 * linux/spinlock_types.h:
16 * defines the generic type and initializers
17 *
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010018 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070019 * implementations, mostly inline assembly code
20 *
21 * (also included on UP-debug builds:)
22 *
23 * linux/spinlock_api_smp.h:
24 * contains the prototypes for the _spin_*() APIs.
25 *
26 * linux/spinlock.h: builds the final spin_*() APIs.
27 *
28 * on UP builds:
29 *
30 * linux/spinlock_type_up.h:
31 * contains the generic, simplified UP spinlock type.
32 * (which is an empty structure on non-debug builds)
33 *
34 * linux/spinlock_types.h:
35 * defines the generic type and initializers
36 *
37 * linux/spinlock_up.h:
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010038 * contains the arch_spin_*()/etc. version of UP
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070039 * builds. (which are NOPs on non-debug, non-preempt
40 * builds)
41 *
42 * (included on UP-non-debug builds:)
43 *
44 * linux/spinlock_api_up.h:
45 * builds the _spin_*() APIs.
46 *
47 * linux/spinlock.h: builds the final spin_*() APIs.
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 */
49
Steven Rostedt3f3078912008-07-25 01:45:25 -070050#include <linux/typecheck.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/preempt.h>
52#include <linux/linkage.h>
53#include <linux/compiler.h>
David Howellsdf9ee292010-10-07 14:08:55 +010054#include <linux/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/thread_info.h>
56#include <linux/kernel.h>
57#include <linux/stringify.h>
Andrew Morton676dcb82006-12-06 20:31:30 -080058#include <linux/bottom_half.h>
David Howells96f951e2012-03-28 18:30:03 +010059#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62/*
63 * Must define these before including other files, inline functions need them
64 */
Denys Vlasenko75ddb0e2010-02-20 01:03:48 +010065#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67#define LOCK_SECTION_START(extra) \
68 ".subsection 1\n\t" \
69 extra \
70 ".ifndef " LOCK_SECTION_NAME "\n\t" \
71 LOCK_SECTION_NAME ":\n\t" \
72 ".endif\n"
73
74#define LOCK_SECTION_END \
75 ".previous\n\t"
76
Harvey Harrisonec701582008-02-08 04:19:55 -080077#define __lockfunc __attribute__((section(".spinlock.text")))
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79/*
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +010080 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070082#include <linux/spinlock_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070084/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -030085 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070086 */
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070087#ifdef CONFIG_SMP
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070088# include <asm/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#else
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070090# include <linux/spinlock_up.h>
91#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070093#ifdef CONFIG_DEBUG_SPINLOCK
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010094 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
95 struct lock_class_key *key);
96# define raw_spin_lock_init(lock) \
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070097do { \
98 static struct lock_class_key __key; \
99 \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100100 __raw_spin_lock_init((lock), #lock, &__key); \
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700101} while (0)
102
103#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100104# define raw_spin_lock_init(lock) \
105 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700106#endif
107
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700109
Nick Piggin95c354f2008-01-30 13:31:20 +0100110#ifdef CONFIG_GENERIC_LOCKBREAK
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100111#define raw_spin_is_contended(lock) ((lock)->break_lock)
Nick Piggin95c354f2008-01-30 13:31:20 +0100112#else
Kyle McMartina5ef7ca2009-02-08 17:39:58 -0500113
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100114#ifdef arch_spin_is_contended
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100115#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
Kyle McMartina5ef7ca2009-02-08 17:39:58 -0500116#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100117#define raw_spin_is_contended(lock) (((void)(lock), 0))
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100118#endif /*arch_spin_is_contended*/
Nick Piggin95c354f2008-01-30 13:31:20 +0100119#endif
120
Oleg Nesterove0acd0a2013-08-12 18:14:00 +0200121/*
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200122 * This barrier must provide two things:
123 *
124 * - it must guarantee a STORE before the spin_lock() is ordered against a
125 * LOAD after it, see the comments at its two usage sites.
126 *
127 * - it must ensure the critical section is RCsc.
128 *
129 * The latter is important for cases where we observe values written by other
130 * CPUs in spin-loops, without barriers, while being subject to scheduling.
131 *
132 * CPU0 CPU1 CPU2
133 *
134 * for (;;) {
135 * if (READ_ONCE(X))
136 * break;
137 * }
138 * X=1
139 * <sched-out>
140 * <sched-in>
141 * r = X;
142 *
143 * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
144 * we get migrated and CPU2 sees X==0.
145 *
146 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
147 * the LL/SC loop, they need no further barriers. Similarly all our TSO
148 * architectures imply an smp_mb() for each atomic instruction and equally don't
149 * need more.
150 *
151 * Architectures that can implement ACQUIRE better need to take care.
Oleg Nesterove0acd0a2013-08-12 18:14:00 +0200152 */
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200153#ifndef smp_mb__after_spinlock
154#define smp_mb__after_spinlock() do { } while (0)
Jiri Olsaad462762009-07-08 12:10:31 +0000155#endif
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157#ifdef CONFIG_DEBUG_SPINLOCK
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800158 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100159#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
160 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800161 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162#else
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800163static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100164{
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800165 __acquire(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100166 arch_spin_lock(&lock->raw_lock);
167}
168
Will Deacona4c18872017-10-03 19:25:29 +0100169#ifndef arch_spin_lock_flags
170#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
171#endif
172
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100173static inline void
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800174do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100175{
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800176 __acquire(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100177 arch_spin_lock_flags(&lock->raw_lock, *flags);
178}
179
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100180static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100181{
182 return arch_spin_trylock(&(lock)->raw_lock);
183}
184
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800185static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100186{
187 arch_spin_unlock(&lock->raw_lock);
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800188 __release(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100189}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190#endif
191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192/*
Thomas Gleixneref12f102009-11-07 23:04:15 +0100193 * Define the various spin_lock methods. Note we define these
194 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
195 * various methods are defined as nops in the case they are not
196 * required.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 */
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100198#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100200#define raw_spin_lock(lock) _raw_spin_lock(lock)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700201
202#ifdef CONFIG_DEBUG_LOCK_ALLOC
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100203# define raw_spin_lock_nested(lock, subclass) \
204 _raw_spin_lock_nested(lock, subclass)
205
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100206# define raw_spin_lock_nest_lock(lock, nest_lock) \
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200207 do { \
208 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100209 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200210 } while (0)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700211#else
Bart Van Assche49992012014-08-08 12:35:36 +0200212/*
213 * Always evaluate the 'subclass' argument to avoid that the compiler
214 * warns about set-but-not-used variables when building with
215 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
216 */
217# define raw_spin_lock_nested(lock, subclass) \
218 _raw_spin_lock(((void)(subclass), (lock)))
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100219# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700220#endif
221
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700222#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
Linus Torvaldsb8e6ec82006-11-26 16:27:17 -0800223
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100224#define raw_spin_lock_irqsave(lock, flags) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700225 do { \
226 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100227 flags = _raw_spin_lock_irqsave(lock); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700228 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800229
230#ifdef CONFIG_DEBUG_LOCK_ALLOC
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100231#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700232 do { \
233 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100234 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700235 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800236#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100237#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700238 do { \
239 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100240 flags = _raw_spin_lock_irqsave(lock); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700241 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800242#endif
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244#else
Linus Torvaldsb8e6ec82006-11-26 16:27:17 -0800245
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100246#define raw_spin_lock_irqsave(lock, flags) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700247 do { \
248 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100249 _raw_spin_lock_irqsave(lock, flags); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700250 } while (0)
Thomas Gleixneref12f102009-11-07 23:04:15 +0100251
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100252#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
253 raw_spin_lock_irqsave(lock, flags)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255#endif
256
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100257#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
258#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
259#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
260#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100262#define raw_spin_unlock_irqrestore(lock, flags) \
263 do { \
264 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100265 _raw_spin_unlock_irqrestore(lock, flags); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700266 } while (0)
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100267#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100269#define raw_spin_trylock_bh(lock) \
270 __cond_lock(lock, _raw_spin_trylock_bh(lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100272#define raw_spin_trylock_irq(lock) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273({ \
274 local_irq_disable(); \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100275 raw_spin_trylock(lock) ? \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700276 1 : ({ local_irq_enable(); 0; }); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277})
278
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100279#define raw_spin_trylock_irqsave(lock, flags) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280({ \
281 local_irq_save(flags); \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100282 raw_spin_trylock(lock) ? \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700283 1 : ({ local_irq_restore(flags); 0; }); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284})
285
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100286/* Include rwlock functions */
287#include <linux/rwlock.h>
288
289/*
290 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
291 */
292#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
293# include <linux/spinlock_api_smp.h>
294#else
295# include <linux/spinlock_api_up.h>
296#endif
297
298/*
299 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
300 */
301
Denys Vlasenko34905652015-07-13 20:31:03 +0200302static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100303{
304 return &lock->rlock;
305}
306
307#define spin_lock_init(_lock) \
308do { \
309 spinlock_check(_lock); \
310 raw_spin_lock_init(&(_lock)->rlock); \
311} while (0)
312
Denys Vlasenko34905652015-07-13 20:31:03 +0200313static __always_inline void spin_lock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100314{
315 raw_spin_lock(&lock->rlock);
316}
317
Denys Vlasenko34905652015-07-13 20:31:03 +0200318static __always_inline void spin_lock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100319{
320 raw_spin_lock_bh(&lock->rlock);
321}
322
Denys Vlasenko34905652015-07-13 20:31:03 +0200323static __always_inline int spin_trylock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100324{
325 return raw_spin_trylock(&lock->rlock);
326}
327
328#define spin_lock_nested(lock, subclass) \
329do { \
330 raw_spin_lock_nested(spinlock_check(lock), subclass); \
331} while (0)
332
333#define spin_lock_nest_lock(lock, nest_lock) \
334do { \
335 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
336} while (0)
337
Denys Vlasenko34905652015-07-13 20:31:03 +0200338static __always_inline void spin_lock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100339{
340 raw_spin_lock_irq(&lock->rlock);
341}
342
343#define spin_lock_irqsave(lock, flags) \
344do { \
345 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
346} while (0)
347
348#define spin_lock_irqsave_nested(lock, flags, subclass) \
349do { \
350 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
351} while (0)
352
Denys Vlasenko34905652015-07-13 20:31:03 +0200353static __always_inline void spin_unlock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100354{
355 raw_spin_unlock(&lock->rlock);
356}
357
Denys Vlasenko34905652015-07-13 20:31:03 +0200358static __always_inline void spin_unlock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100359{
360 raw_spin_unlock_bh(&lock->rlock);
361}
362
Denys Vlasenko34905652015-07-13 20:31:03 +0200363static __always_inline void spin_unlock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100364{
365 raw_spin_unlock_irq(&lock->rlock);
366}
367
Denys Vlasenko34905652015-07-13 20:31:03 +0200368static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100369{
370 raw_spin_unlock_irqrestore(&lock->rlock, flags);
371}
372
Denys Vlasenko34905652015-07-13 20:31:03 +0200373static __always_inline int spin_trylock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100374{
375 return raw_spin_trylock_bh(&lock->rlock);
376}
377
Denys Vlasenko34905652015-07-13 20:31:03 +0200378static __always_inline int spin_trylock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100379{
380 return raw_spin_trylock_irq(&lock->rlock);
381}
382
383#define spin_trylock_irqsave(lock, flags) \
384({ \
385 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
386})
387
Denys Vlasenko34905652015-07-13 20:31:03 +0200388static __always_inline int spin_is_locked(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100389{
390 return raw_spin_is_locked(&lock->rlock);
391}
392
Denys Vlasenko34905652015-07-13 20:31:03 +0200393static __always_inline int spin_is_contended(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100394{
395 return raw_spin_is_contended(&lock->rlock);
396}
397
Paul Gortmaker4ebc1b42012-01-20 18:20:37 -0500398#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100399
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700400/*
401 * Pull the atomic_t declaration:
402 * (asm-mips/atomic.h needs above definitions)
403 */
Arun Sharma600634972011-07-26 16:09:06 -0700404#include <linux/atomic.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700405/**
406 * atomic_dec_and_lock - lock on reaching reference count zero
407 * @atomic: the atomic counter
408 * @lock: the spinlock in question
J. Bruce Fieldsdc07e722008-04-07 15:59:05 -0400409 *
410 * Decrements @atomic by 1. If the result is 0, returns true and locks
411 * @lock. Returns false for all other cases.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700412 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700414#define atomic_dec_and_lock(atomic, lock) \
Josh Triplettdcc8e552006-09-29 02:01:03 -0700415 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417#endif /* __LINUX_SPINLOCK_H */