blob: da5a5a1f4cd2ec17fb2a5f544cc71f8e800b2d10 [file] [log] [blame]
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001/*
2 * Runtime locking correctness validator
3 *
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -07004 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07006 *
7 * see Documentation/lockdep-design.txt for more details.
8 */
9#ifndef __LINUX_LOCKDEP_H
10#define __LINUX_LOCKDEP_H
11
Heiko Carstensa1e96b02007-02-12 00:52:20 -080012struct task_struct;
Peter Zijlstraf20786f2007-07-19 01:48:56 -070013struct lockdep_map;
Heiko Carstensa1e96b02007-02-12 00:52:20 -080014
Michael S. Tsirkindb0b0ea2006-09-29 01:59:28 -070015#ifdef CONFIG_LOCKDEP
16
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070017#include <linux/linkage.h>
18#include <linux/list.h>
19#include <linux/debug_locks.h>
20#include <linux/stacktrace.h>
21
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070022/*
Peter Zijlstra98516732009-01-22 14:18:40 +010023 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
24 * the total number of states... :-(
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070025 */
Peter Zijlstra98516732009-01-22 14:18:40 +010026#define XXX_LOCK_USAGE_STATES (1+3*4)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070027
28#define MAX_LOCKDEP_SUBCLASSES 8UL
29
30/*
31 * Lock-classes are keyed via unique addresses, by embedding the
32 * lockclass-key into the kernel (or module) .data section. (For
33 * static locks we use the lock address itself as the key.)
34 */
35struct lockdep_subclass_key {
36 char __one_byte;
37} __attribute__ ((__packed__));
38
39struct lock_class_key {
40 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
41};
42
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +020043#define LOCKSTAT_POINTS 4
44
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070045/*
46 * The lock-class itself:
47 */
48struct lock_class {
49 /*
50 * class-hash:
51 */
52 struct list_head hash_entry;
53
54 /*
55 * global list of all lock-classes:
56 */
57 struct list_head lock_entry;
58
59 struct lockdep_subclass_key *key;
60 unsigned int subclass;
David Miller419ca3f2008-07-29 21:45:03 -070061 unsigned int dep_gen_id;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070062
63 /*
64 * IRQ/softirq usage tracking bits:
65 */
66 unsigned long usage_mask;
Peter Zijlstra98516732009-01-22 14:18:40 +010067 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070068
69 /*
70 * These fields represent a directed graph of lock dependencies,
71 * to every node we attach a list of "forward" and a list of
72 * "backward" graph nodes.
73 */
74 struct list_head locks_after, locks_before;
75
76 /*
77 * Generation counter, when doing certain classes of graph walking,
78 * to ensure that we check one node only once:
79 */
80 unsigned int version;
81
82 /*
83 * Statistics counter:
84 */
85 unsigned long ops;
86
87 const char *name;
88 int name_version;
Peter Zijlstraf20786f2007-07-19 01:48:56 -070089
90#ifdef CONFIG_LOCK_STAT
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +020091 unsigned long contention_point[LOCKSTAT_POINTS];
92 unsigned long contending_point[LOCKSTAT_POINTS];
Peter Zijlstraf20786f2007-07-19 01:48:56 -070093#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070094};
95
Peter Zijlstraf20786f2007-07-19 01:48:56 -070096#ifdef CONFIG_LOCK_STAT
97struct lock_time {
98 s64 min;
99 s64 max;
100 s64 total;
101 unsigned long nr;
102};
103
Peter Zijlstra96645672007-07-19 01:49:00 -0700104enum bounce_type {
105 bounce_acquired_write,
106 bounce_acquired_read,
107 bounce_contended_write,
108 bounce_contended_read,
109 nr_bounce_types,
110
111 bounce_acquired = bounce_acquired_write,
112 bounce_contended = bounce_contended_write,
113};
114
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700115struct lock_class_stats {
116 unsigned long contention_point[4];
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200117 unsigned long contending_point[4];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700118 struct lock_time read_waittime;
119 struct lock_time write_waittime;
120 struct lock_time read_holdtime;
121 struct lock_time write_holdtime;
Peter Zijlstra96645672007-07-19 01:49:00 -0700122 unsigned long bounces[nr_bounce_types];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700123};
124
125struct lock_class_stats lock_stats(struct lock_class *class);
126void clear_lock_stats(struct lock_class *class);
127#endif
128
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700129/*
130 * Map the lock object (the lock instance) to the lock-class object.
131 * This is embedded into specific lock instances:
132 */
133struct lockdep_map {
134 struct lock_class_key *key;
Ingo Molnard6d897c2006-07-10 04:44:04 -0700135 struct lock_class *class_cache;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700136 const char *name;
Peter Zijlstra96645672007-07-19 01:49:00 -0700137#ifdef CONFIG_LOCK_STAT
138 int cpu;
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200139 unsigned long ip;
Peter Zijlstra96645672007-07-19 01:49:00 -0700140#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700141};
142
143/*
144 * Every lock has a list of other locks that were taken after it.
145 * We only grow the list, never remove from it:
146 */
147struct lock_list {
148 struct list_head entry;
149 struct lock_class *class;
150 struct stack_trace trace;
Jason Baron068135e2007-02-10 01:44:59 -0800151 int distance;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700152};
153
154/*
155 * We record lock dependency chains, so that we can cache them:
156 */
157struct lock_chain {
Huang, Ying443cd502008-06-20 16:39:21 +0800158 u8 irq_context;
159 u8 depth;
160 u16 base;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700161 struct list_head entry;
162 u64 chain_key;
163};
164
Ingo Molnare5f363e32008-08-11 12:37:27 +0200165#define MAX_LOCKDEP_KEYS_BITS 13
Peter Zijlstrab42e7372008-08-11 12:34:42 +0200166/*
167 * Subtract one because we offset hlock->class_idx by 1 in order
168 * to make 0 mean no class. This avoids overflowing the class_idx
169 * bitfield and hitting the BUG in hlock_class().
170 */
171#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
Dave Jonesf82b2172008-08-11 09:30:23 +0200172
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700173struct held_lock {
174 /*
175 * One-way hash of the dependency chain up to this point. We
176 * hash the hashes step by step as the dependency chain grows.
177 *
178 * We use it for dependency-caching and we skip detection
179 * passes and dependency-updates if there is a cache-hit, so
180 * it is absolutely critical for 100% coverage of the validator
181 * to have a unique key value for every unique dependency path
182 * that can occur in the system, to make a unique hash value
183 * as likely as possible - hence the 64-bit width.
184 *
185 * The task struct holds the current hash value (initialized
186 * with zero), here we store the previous hash value:
187 */
188 u64 prev_chain_key;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700189 unsigned long acquire_ip;
190 struct lockdep_map *instance;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200191 struct lockdep_map *nest_lock;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700192#ifdef CONFIG_LOCK_STAT
193 u64 waittime_stamp;
194 u64 holdtime_stamp;
195#endif
Dave Jonesf82b2172008-08-11 09:30:23 +0200196 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700197 /*
198 * The lock-stack is unified in that the lock chains of interrupt
199 * contexts nest ontop of process context chains, but we 'separate'
200 * the hashes by starting with 0 if we cross into an interrupt
201 * context, and we also keep do not add cross-context lock
202 * dependencies - the lock usage graph walking covers that area
203 * anyway, and we'd just unnecessarily increase the number of
204 * dependencies otherwise. [Note: hardirq and softirq contexts
205 * are separated from each other too.]
206 *
207 * The following field is used to detect when we cross into an
208 * interrupt context:
209 */
Dave Jonesf82b2172008-08-11 09:30:23 +0200210 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
211 unsigned int trylock:1;
212 unsigned int read:2; /* see lock_acquire() comment */
213 unsigned int check:2; /* see lock_acquire() comment */
214 unsigned int hardirqs_off:1;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700215};
216
217/*
218 * Initialization, self-test and debugging-output methods:
219 */
220extern void lockdep_init(void);
221extern void lockdep_info(void);
222extern void lockdep_reset(void);
223extern void lockdep_reset_lock(struct lockdep_map *lock);
224extern void lockdep_free_key_range(void *start, unsigned long size);
Peter Zijlstrab351d162007-10-11 22:11:12 +0200225extern void lockdep_sys_exit(void);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700226
227extern void lockdep_off(void);
228extern void lockdep_on(void);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700229
230/*
231 * These methods are used by specific locking variants (spinlocks,
232 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
233 * to lockdep:
234 */
235
236extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400237 struct lock_class_key *key, int subclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700238
239/*
Peter Zijlstra851a67b2007-10-11 22:11:12 +0200240 * To initialize a lockdep_map statically use this macro.
241 * Note that _name must not be NULL.
242 */
243#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
244 { .name = (_name), .key = (void *)(_key), }
245
246/*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700247 * Reinitialize a lock key - for cases where there is special locking or
248 * special initialization of locks so that the validator gets the scope
249 * of dependencies wrong: they are either too broad (they need a class-split)
250 * or they are too narrow (they suffer from a false class-split):
251 */
252#define lockdep_set_class(lock, key) \
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400253 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700254#define lockdep_set_class_and_name(lock, key, name) \
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400255 lockdep_init_map(&(lock)->dep_map, name, key, 0)
256#define lockdep_set_class_and_subclass(lock, key, sub) \
257 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
258#define lockdep_set_subclass(lock, sub) \
259 lockdep_init_map(&(lock)->dep_map, #lock, \
260 (lock)->dep_map.key, sub)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700261
262/*
263 * Acquire a lock.
264 *
265 * Values for "read":
266 *
267 * 0: exclusive (write) acquire
268 * 1: read-acquire (no recursion allowed)
269 * 2: read-acquire with same-instance recursion allowed
270 *
271 * Values for check:
272 *
273 * 0: disabled
274 * 1: simple checks (freeing, held-at-exit-time, etc.)
275 * 2: full validation
276 */
277extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200278 int trylock, int read, int check,
279 struct lockdep_map *nest_lock, unsigned long ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700280
281extern void lock_release(struct lockdep_map *lock, int nested,
282 unsigned long ip);
283
Peter Zijlstra00ef9f72008-12-04 09:00:17 +0100284extern void lock_set_class(struct lockdep_map *lock, const char *name,
285 struct lock_class_key *key, unsigned int subclass,
286 unsigned long ip);
287
288static inline void lock_set_subclass(struct lockdep_map *lock,
289 unsigned int subclass, unsigned long ip)
290{
291 lock_set_class(lock, lock->name, lock->key, subclass, ip);
292}
Peter Zijlstra64aa3482008-08-11 09:30:21 +0200293
Nick Piggincf40bd12009-01-21 08:12:39 +0100294extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
295extern void lockdep_clear_current_reclaim_state(void);
296extern void lockdep_trace_alloc(gfp_t mask);
297
298# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700299
Jarek Poplawskie3a55fd2007-03-22 00:11:26 -0800300#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800301
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700302#else /* !LOCKDEP */
303
304static inline void lockdep_off(void)
305{
306}
307
308static inline void lockdep_on(void)
309{
310}
311
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200312# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700313# define lock_release(l, n, i) do { } while (0)
Peter Zijlstra00ef9f72008-12-04 09:00:17 +0100314# define lock_set_class(l, n, k, s, i) do { } while (0)
Peter Zijlstra64aa3482008-08-11 09:30:21 +0200315# define lock_set_subclass(l, s, i) do { } while (0)
Nick Piggincf40bd12009-01-21 08:12:39 +0100316# define lockdep_set_current_reclaim_state(g) do { } while (0)
317# define lockdep_clear_current_reclaim_state() do { } while (0)
318# define lockdep_trace_alloc(g) do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700319# define lockdep_init() do { } while (0)
320# define lockdep_info() do { } while (0)
Ingo Molnare25cf3d2008-10-17 15:55:07 +0200321# define lockdep_init_map(lock, name, key, sub) \
322 do { (void)(name); (void)(key); } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700323# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
324# define lockdep_set_class_and_name(lock, key, name) \
Ingo Molnare25cf3d2008-10-17 15:55:07 +0200325 do { (void)(key); (void)(name); } while (0)
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400326#define lockdep_set_class_and_subclass(lock, key, sub) \
327 do { (void)(key); } while (0)
Andrew Morton07646e22006-10-11 23:45:23 -0400328#define lockdep_set_subclass(lock, sub) do { } while (0)
329
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700330# define INIT_LOCKDEP
331# define lockdep_reset() do { debug_locks = 1; } while (0)
332# define lockdep_free_key_range(start, size) do { } while (0)
Peter Zijlstrab351d162007-10-11 22:11:12 +0200333# define lockdep_sys_exit() do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700334/*
335 * The class key takes no space if lockdep is disabled:
336 */
337struct lock_class_key { };
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800338
339#define lockdep_depth(tsk) (0)
340
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700341#endif /* !LOCKDEP */
342
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700343#ifdef CONFIG_LOCK_STAT
344
345extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200346extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700347
348#define LOCK_CONTENDED(_lock, try, lock) \
349do { \
350 if (!try(_lock)) { \
351 lock_contended(&(_lock)->dep_map, _RET_IP_); \
352 lock(_lock); \
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700353 } \
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200354 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700355} while (0)
356
357#else /* CONFIG_LOCK_STAT */
358
359#define lock_contended(lockdep_map, ip) do {} while (0)
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200360#define lock_acquired(lockdep_map, ip) do {} while (0)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700361
362#define LOCK_CONTENDED(_lock, try, lock) \
363 lock(_lock)
364
365#endif /* CONFIG_LOCK_STAT */
366
Robin Holte8c158b2009-04-02 16:59:45 -0700367#ifdef CONFIG_LOCKDEP
368
369/*
370 * On lockdep we dont want the hand-coded irq-enable of
371 * _raw_*_lock_flags() code, because lockdep assumes
372 * that interrupts are not re-enabled during lock-acquire:
373 */
374#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
375 LOCK_CONTENDED((_lock), (try), (lock))
376
377#else /* CONFIG_LOCKDEP */
378
379#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
380 lockfl((_lock), (flags))
381
382#endif /* CONFIG_LOCKDEP */
383
KOSAKI Motohiro74c8a612008-12-17 19:40:33 +0900384#ifdef CONFIG_GENERIC_HARDIRQS
Ingo Molnar243c7622006-07-03 00:25:06 -0700385extern void early_init_irq_lock_class(void);
386#else
Ingo Molnar3117df02006-12-13 00:34:43 -0800387static inline void early_init_irq_lock_class(void)
388{
389}
Ingo Molnar243c7622006-07-03 00:25:06 -0700390#endif
391
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700392#ifdef CONFIG_TRACE_IRQFLAGS
393extern void early_boot_irqs_off(void);
394extern void early_boot_irqs_on(void);
Ingo Molnar3117df02006-12-13 00:34:43 -0800395extern void print_irqtrace_events(struct task_struct *curr);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700396#else
Ingo Molnar3117df02006-12-13 00:34:43 -0800397static inline void early_boot_irqs_off(void)
398{
399}
400static inline void early_boot_irqs_on(void)
401{
402}
403static inline void print_irqtrace_events(struct task_struct *curr)
404{
405}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700406#endif
407
408/*
409 * For trivial one-depth nesting of a lock-class, the following
410 * global define can be used. (Subsystems with multiple levels
411 * of nesting should define their own lock-nesting subclasses.)
412 */
413#define SINGLE_DEPTH_NESTING 1
414
415/*
416 * Map the dependency ops to NOP or to real lockdep ops, depending
417 * on the per lock-class debug mode:
418 */
419
420#ifdef CONFIG_DEBUG_LOCK_ALLOC
421# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200422# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200423# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700424# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200425# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200426# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700427# endif
428# define spin_release(l, n, i) lock_release(l, n, i)
429#else
430# define spin_acquire(l, s, t, i) do { } while (0)
431# define spin_release(l, n, i) do { } while (0)
432#endif
433
434#ifdef CONFIG_DEBUG_LOCK_ALLOC
435# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200436# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
437# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700438# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200439# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
440# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700441# endif
442# define rwlock_release(l, n, i) lock_release(l, n, i)
443#else
444# define rwlock_acquire(l, s, t, i) do { } while (0)
445# define rwlock_acquire_read(l, s, t, i) do { } while (0)
446# define rwlock_release(l, n, i) do { } while (0)
447#endif
448
449#ifdef CONFIG_DEBUG_LOCK_ALLOC
450# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200451# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700452# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200453# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700454# endif
455# define mutex_release(l, n, i) lock_release(l, n, i)
456#else
457# define mutex_acquire(l, s, t, i) do { } while (0)
458# define mutex_release(l, n, i) do { } while (0)
459#endif
460
461#ifdef CONFIG_DEBUG_LOCK_ALLOC
462# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200463# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
464# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700465# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200466# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
467# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700468# endif
469# define rwsem_release(l, n, i) lock_release(l, n, i)
470#else
471# define rwsem_acquire(l, s, t, i) do { } while (0)
472# define rwsem_acquire_read(l, s, t, i) do { } while (0)
473# define rwsem_release(l, n, i) do { } while (0)
474#endif
475
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200476#ifdef CONFIG_DEBUG_LOCK_ALLOC
477# ifdef CONFIG_PROVE_LOCKING
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200478# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200479# else
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200480# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200481# endif
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200482# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200483#else
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200484# define lock_map_acquire(l) do { } while (0)
485# define lock_map_release(l) do { } while (0)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200486#endif
487
Peter Zijlstra76b189e2008-09-10 09:57:35 +0200488#ifdef CONFIG_PROVE_LOCKING
489# define might_lock(lock) \
490do { \
491 typecheck(struct lockdep_map *, &(lock)->dep_map); \
492 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
493 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
494} while (0)
495# define might_lock_read(lock) \
496do { \
497 typecheck(struct lockdep_map *, &(lock)->dep_map); \
498 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
499 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
500} while (0)
501#else
502# define might_lock(lock) do { } while (0)
503# define might_lock_read(lock) do { } while (0)
504#endif
505
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700506#endif /* __LINUX_LOCKDEP_H */