blob: 9ccf0e286b2a9dc3cd6b78cb18243a21e5768de1 [file] [log] [blame]
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001/*
2 * Runtime locking correctness validator
3 *
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -07004 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07006 *
7 * see Documentation/lockdep-design.txt for more details.
8 */
9#ifndef __LINUX_LOCKDEP_H
10#define __LINUX_LOCKDEP_H
11
Heiko Carstensa1e96b02007-02-12 00:52:20 -080012struct task_struct;
Peter Zijlstraf20786f2007-07-19 01:48:56 -070013struct lockdep_map;
Heiko Carstensa1e96b02007-02-12 00:52:20 -080014
Michael S. Tsirkindb0b0ea2006-09-29 01:59:28 -070015#ifdef CONFIG_LOCKDEP
16
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070017#include <linux/linkage.h>
18#include <linux/list.h>
19#include <linux/debug_locks.h>
20#include <linux/stacktrace.h>
21
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070022/*
Peter Zijlstra98516732009-01-22 14:18:40 +010023 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
24 * the total number of states... :-(
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070025 */
Peter Zijlstra98516732009-01-22 14:18:40 +010026#define XXX_LOCK_USAGE_STATES (1+3*4)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070027
28#define MAX_LOCKDEP_SUBCLASSES 8UL
29
30/*
31 * Lock-classes are keyed via unique addresses, by embedding the
32 * lockclass-key into the kernel (or module) .data section. (For
33 * static locks we use the lock address itself as the key.)
34 */
35struct lockdep_subclass_key {
36 char __one_byte;
37} __attribute__ ((__packed__));
38
39struct lock_class_key {
40 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
41};
42
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +020043#define LOCKSTAT_POINTS 4
44
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070045/*
46 * The lock-class itself:
47 */
48struct lock_class {
49 /*
50 * class-hash:
51 */
52 struct list_head hash_entry;
53
54 /*
55 * global list of all lock-classes:
56 */
57 struct list_head lock_entry;
58
59 struct lockdep_subclass_key *key;
60 unsigned int subclass;
Ming Leie351b662009-07-22 22:48:09 +080061 unsigned int dep_gen_id;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070062
63 /*
64 * IRQ/softirq usage tracking bits:
65 */
66 unsigned long usage_mask;
Peter Zijlstra98516732009-01-22 14:18:40 +010067 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070068
69 /*
70 * These fields represent a directed graph of lock dependencies,
71 * to every node we attach a list of "forward" and a list of
72 * "backward" graph nodes.
73 */
74 struct list_head locks_after, locks_before;
75
76 /*
77 * Generation counter, when doing certain classes of graph walking,
78 * to ensure that we check one node only once:
79 */
80 unsigned int version;
81
82 /*
83 * Statistics counter:
84 */
85 unsigned long ops;
86
87 const char *name;
88 int name_version;
Peter Zijlstraf20786f2007-07-19 01:48:56 -070089
90#ifdef CONFIG_LOCK_STAT
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +020091 unsigned long contention_point[LOCKSTAT_POINTS];
92 unsigned long contending_point[LOCKSTAT_POINTS];
Peter Zijlstraf20786f2007-07-19 01:48:56 -070093#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070094};
95
Peter Zijlstraf20786f2007-07-19 01:48:56 -070096#ifdef CONFIG_LOCK_STAT
97struct lock_time {
98 s64 min;
99 s64 max;
100 s64 total;
101 unsigned long nr;
102};
103
Peter Zijlstra96645672007-07-19 01:49:00 -0700104enum bounce_type {
105 bounce_acquired_write,
106 bounce_acquired_read,
107 bounce_contended_write,
108 bounce_contended_read,
109 nr_bounce_types,
110
111 bounce_acquired = bounce_acquired_write,
112 bounce_contended = bounce_contended_write,
113};
114
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700115struct lock_class_stats {
116 unsigned long contention_point[4];
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200117 unsigned long contending_point[4];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700118 struct lock_time read_waittime;
119 struct lock_time write_waittime;
120 struct lock_time read_holdtime;
121 struct lock_time write_holdtime;
Peter Zijlstra96645672007-07-19 01:49:00 -0700122 unsigned long bounces[nr_bounce_types];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700123};
124
125struct lock_class_stats lock_stats(struct lock_class *class);
126void clear_lock_stats(struct lock_class *class);
127#endif
128
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700129/*
130 * Map the lock object (the lock instance) to the lock-class object.
131 * This is embedded into specific lock instances:
132 */
133struct lockdep_map {
134 struct lock_class_key *key;
Ingo Molnard6d897c2006-07-10 04:44:04 -0700135 struct lock_class *class_cache;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700136 const char *name;
Peter Zijlstra96645672007-07-19 01:49:00 -0700137#ifdef CONFIG_LOCK_STAT
138 int cpu;
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200139 unsigned long ip;
Peter Zijlstra96645672007-07-19 01:49:00 -0700140#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700141};
142
143/*
144 * Every lock has a list of other locks that were taken after it.
145 * We only grow the list, never remove from it:
146 */
147struct lock_list {
148 struct list_head entry;
149 struct lock_class *class;
150 struct stack_trace trace;
Jason Baron068135e2007-02-10 01:44:59 -0800151 int distance;
Ming Leic94aa5c2009-07-16 15:44:29 +0200152
Peter Zijlstraaf012962009-07-16 15:44:29 +0200153 /*
154 * The parent field is used to implement breadth-first search, and the
155 * bit 0 is reused to indicate if the lock has been accessed in BFS.
Ming Leic94aa5c2009-07-16 15:44:29 +0200156 */
157 struct lock_list *parent;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700158};
159
160/*
161 * We record lock dependency chains, so that we can cache them:
162 */
163struct lock_chain {
Huang, Ying443cd502008-06-20 16:39:21 +0800164 u8 irq_context;
165 u8 depth;
166 u16 base;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700167 struct list_head entry;
168 u64 chain_key;
169};
170
Ingo Molnare5f363e32008-08-11 12:37:27 +0200171#define MAX_LOCKDEP_KEYS_BITS 13
Peter Zijlstrab42e7372008-08-11 12:34:42 +0200172/*
173 * Subtract one because we offset hlock->class_idx by 1 in order
174 * to make 0 mean no class. This avoids overflowing the class_idx
175 * bitfield and hitting the BUG in hlock_class().
176 */
177#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
Dave Jonesf82b2172008-08-11 09:30:23 +0200178
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700179struct held_lock {
180 /*
181 * One-way hash of the dependency chain up to this point. We
182 * hash the hashes step by step as the dependency chain grows.
183 *
184 * We use it for dependency-caching and we skip detection
185 * passes and dependency-updates if there is a cache-hit, so
186 * it is absolutely critical for 100% coverage of the validator
187 * to have a unique key value for every unique dependency path
188 * that can occur in the system, to make a unique hash value
189 * as likely as possible - hence the 64-bit width.
190 *
191 * The task struct holds the current hash value (initialized
192 * with zero), here we store the previous hash value:
193 */
194 u64 prev_chain_key;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700195 unsigned long acquire_ip;
196 struct lockdep_map *instance;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200197 struct lockdep_map *nest_lock;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700198#ifdef CONFIG_LOCK_STAT
199 u64 waittime_stamp;
200 u64 holdtime_stamp;
201#endif
Dave Jonesf82b2172008-08-11 09:30:23 +0200202 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700203 /*
204 * The lock-stack is unified in that the lock chains of interrupt
205 * contexts nest ontop of process context chains, but we 'separate'
206 * the hashes by starting with 0 if we cross into an interrupt
207 * context, and we also keep do not add cross-context lock
208 * dependencies - the lock usage graph walking covers that area
209 * anyway, and we'd just unnecessarily increase the number of
210 * dependencies otherwise. [Note: hardirq and softirq contexts
211 * are separated from each other too.]
212 *
213 * The following field is used to detect when we cross into an
214 * interrupt context:
215 */
Dave Jonesf82b2172008-08-11 09:30:23 +0200216 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
Peter Zijlstrabb97a912009-07-20 19:15:35 +0200217 unsigned int trylock:1; /* 16 bits */
218
Dave Jonesf82b2172008-08-11 09:30:23 +0200219 unsigned int read:2; /* see lock_acquire() comment */
220 unsigned int check:2; /* see lock_acquire() comment */
221 unsigned int hardirqs_off:1;
Peter Zijlstrabb97a912009-07-20 19:15:35 +0200222 unsigned int references:11; /* 32 bits */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700223};
224
225/*
226 * Initialization, self-test and debugging-output methods:
227 */
228extern void lockdep_init(void);
229extern void lockdep_info(void);
230extern void lockdep_reset(void);
231extern void lockdep_reset_lock(struct lockdep_map *lock);
232extern void lockdep_free_key_range(void *start, unsigned long size);
Peter Zijlstrab351d162007-10-11 22:11:12 +0200233extern void lockdep_sys_exit(void);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700234
235extern void lockdep_off(void);
236extern void lockdep_on(void);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700237
238/*
239 * These methods are used by specific locking variants (spinlocks,
240 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
241 * to lockdep:
242 */
243
244extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400245 struct lock_class_key *key, int subclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700246
247/*
Peter Zijlstra851a67b2007-10-11 22:11:12 +0200248 * To initialize a lockdep_map statically use this macro.
249 * Note that _name must not be NULL.
250 */
251#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
252 { .name = (_name), .key = (void *)(_key), }
253
254/*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700255 * Reinitialize a lock key - for cases where there is special locking or
256 * special initialization of locks so that the validator gets the scope
257 * of dependencies wrong: they are either too broad (they need a class-split)
258 * or they are too narrow (they suffer from a false class-split):
259 */
260#define lockdep_set_class(lock, key) \
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400261 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700262#define lockdep_set_class_and_name(lock, key, name) \
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400263 lockdep_init_map(&(lock)->dep_map, name, key, 0)
264#define lockdep_set_class_and_subclass(lock, key, sub) \
265 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
266#define lockdep_set_subclass(lock, sub) \
267 lockdep_init_map(&(lock)->dep_map, #lock, \
268 (lock)->dep_map.key, sub)
Jan Kara9a7aa122009-06-04 15:26:49 +0200269/*
270 * Compare locking classes
271 */
272#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
273
274static inline int lockdep_match_key(struct lockdep_map *lock,
275 struct lock_class_key *key)
276{
277 return lock->key == key;
278}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700279
280/*
281 * Acquire a lock.
282 *
283 * Values for "read":
284 *
285 * 0: exclusive (write) acquire
286 * 1: read-acquire (no recursion allowed)
287 * 2: read-acquire with same-instance recursion allowed
288 *
289 * Values for check:
290 *
291 * 0: disabled
292 * 1: simple checks (freeing, held-at-exit-time, etc.)
293 * 2: full validation
294 */
295extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200296 int trylock, int read, int check,
297 struct lockdep_map *nest_lock, unsigned long ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700298
299extern void lock_release(struct lockdep_map *lock, int nested,
300 unsigned long ip);
301
Peter Zijlstraf607c662009-07-20 19:16:29 +0200302#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
303
304extern int lock_is_held(struct lockdep_map *lock);
305
Peter Zijlstra00ef9f72008-12-04 09:00:17 +0100306extern void lock_set_class(struct lockdep_map *lock, const char *name,
307 struct lock_class_key *key, unsigned int subclass,
308 unsigned long ip);
309
310static inline void lock_set_subclass(struct lockdep_map *lock,
311 unsigned int subclass, unsigned long ip)
312{
313 lock_set_class(lock, lock->name, lock->key, subclass, ip);
314}
Peter Zijlstra64aa3482008-08-11 09:30:21 +0200315
Nick Piggincf40bd12009-01-21 08:12:39 +0100316extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
317extern void lockdep_clear_current_reclaim_state(void);
318extern void lockdep_trace_alloc(gfp_t mask);
319
320# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700321
Jarek Poplawskie3a55fd2007-03-22 00:11:26 -0800322#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800323
Peter Zijlstraf607c662009-07-20 19:16:29 +0200324#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
325
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700326#else /* !LOCKDEP */
327
328static inline void lockdep_off(void)
329{
330}
331
332static inline void lockdep_on(void)
333{
334}
335
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200336# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700337# define lock_release(l, n, i) do { } while (0)
Peter Zijlstra00ef9f72008-12-04 09:00:17 +0100338# define lock_set_class(l, n, k, s, i) do { } while (0)
Peter Zijlstra64aa3482008-08-11 09:30:21 +0200339# define lock_set_subclass(l, s, i) do { } while (0)
Nick Piggincf40bd12009-01-21 08:12:39 +0100340# define lockdep_set_current_reclaim_state(g) do { } while (0)
341# define lockdep_clear_current_reclaim_state() do { } while (0)
342# define lockdep_trace_alloc(g) do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700343# define lockdep_init() do { } while (0)
344# define lockdep_info() do { } while (0)
Ingo Molnare25cf3d2008-10-17 15:55:07 +0200345# define lockdep_init_map(lock, name, key, sub) \
346 do { (void)(name); (void)(key); } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700347# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
348# define lockdep_set_class_and_name(lock, key, name) \
Ingo Molnare25cf3d2008-10-17 15:55:07 +0200349 do { (void)(key); (void)(name); } while (0)
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400350#define lockdep_set_class_and_subclass(lock, key, sub) \
351 do { (void)(key); } while (0)
Andrew Morton07646e22006-10-11 23:45:23 -0400352#define lockdep_set_subclass(lock, sub) do { } while (0)
Jan Kara9a7aa122009-06-04 15:26:49 +0200353/*
354 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
355 * case since the result is not well defined and the caller should rather
356 * #ifdef the call himself.
357 */
Andrew Morton07646e22006-10-11 23:45:23 -0400358
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700359# define INIT_LOCKDEP
360# define lockdep_reset() do { debug_locks = 1; } while (0)
361# define lockdep_free_key_range(start, size) do { } while (0)
Peter Zijlstrab351d162007-10-11 22:11:12 +0200362# define lockdep_sys_exit() do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700363/*
364 * The class key takes no space if lockdep is disabled:
365 */
366struct lock_class_key { };
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800367
368#define lockdep_depth(tsk) (0)
369
Peter Zijlstraf607c662009-07-20 19:16:29 +0200370#define lockdep_assert_held(l) do { } while (0)
371
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700372#endif /* !LOCKDEP */
373
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700374#ifdef CONFIG_LOCK_STAT
375
376extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200377extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700378
379#define LOCK_CONTENDED(_lock, try, lock) \
380do { \
381 if (!try(_lock)) { \
382 lock_contended(&(_lock)->dep_map, _RET_IP_); \
383 lock(_lock); \
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700384 } \
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200385 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700386} while (0)
387
388#else /* CONFIG_LOCK_STAT */
389
390#define lock_contended(lockdep_map, ip) do {} while (0)
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200391#define lock_acquired(lockdep_map, ip) do {} while (0)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700392
393#define LOCK_CONTENDED(_lock, try, lock) \
394 lock(_lock)
395
396#endif /* CONFIG_LOCK_STAT */
397
Robin Holte8c158b2009-04-02 16:59:45 -0700398#ifdef CONFIG_LOCKDEP
399
400/*
401 * On lockdep we dont want the hand-coded irq-enable of
402 * _raw_*_lock_flags() code, because lockdep assumes
403 * that interrupts are not re-enabled during lock-acquire:
404 */
405#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
406 LOCK_CONTENDED((_lock), (try), (lock))
407
408#else /* CONFIG_LOCKDEP */
409
410#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
411 lockfl((_lock), (flags))
412
413#endif /* CONFIG_LOCKDEP */
414
KOSAKI Motohiro74c8a612008-12-17 19:40:33 +0900415#ifdef CONFIG_GENERIC_HARDIRQS
Ingo Molnar243c7622006-07-03 00:25:06 -0700416extern void early_init_irq_lock_class(void);
417#else
Ingo Molnar3117df02006-12-13 00:34:43 -0800418static inline void early_init_irq_lock_class(void)
419{
420}
Ingo Molnar243c7622006-07-03 00:25:06 -0700421#endif
422
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700423#ifdef CONFIG_TRACE_IRQFLAGS
424extern void early_boot_irqs_off(void);
425extern void early_boot_irqs_on(void);
Ingo Molnar3117df02006-12-13 00:34:43 -0800426extern void print_irqtrace_events(struct task_struct *curr);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700427#else
Ingo Molnar3117df02006-12-13 00:34:43 -0800428static inline void early_boot_irqs_off(void)
429{
430}
431static inline void early_boot_irqs_on(void)
432{
433}
434static inline void print_irqtrace_events(struct task_struct *curr)
435{
436}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700437#endif
438
439/*
440 * For trivial one-depth nesting of a lock-class, the following
441 * global define can be used. (Subsystems with multiple levels
442 * of nesting should define their own lock-nesting subclasses.)
443 */
444#define SINGLE_DEPTH_NESTING 1
445
446/*
447 * Map the dependency ops to NOP or to real lockdep ops, depending
448 * on the per lock-class debug mode:
449 */
450
451#ifdef CONFIG_DEBUG_LOCK_ALLOC
452# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200453# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200454# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700455# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200456# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200457# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700458# endif
459# define spin_release(l, n, i) lock_release(l, n, i)
460#else
461# define spin_acquire(l, s, t, i) do { } while (0)
462# define spin_release(l, n, i) do { } while (0)
463#endif
464
465#ifdef CONFIG_DEBUG_LOCK_ALLOC
466# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200467# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
468# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700469# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200470# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
471# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700472# endif
473# define rwlock_release(l, n, i) lock_release(l, n, i)
474#else
475# define rwlock_acquire(l, s, t, i) do { } while (0)
476# define rwlock_acquire_read(l, s, t, i) do { } while (0)
477# define rwlock_release(l, n, i) do { } while (0)
478#endif
479
480#ifdef CONFIG_DEBUG_LOCK_ALLOC
481# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200482# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700483# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200484# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700485# endif
486# define mutex_release(l, n, i) lock_release(l, n, i)
487#else
488# define mutex_acquire(l, s, t, i) do { } while (0)
489# define mutex_release(l, n, i) do { } while (0)
490#endif
491
492#ifdef CONFIG_DEBUG_LOCK_ALLOC
493# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200494# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
495# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700496# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200497# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
498# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700499# endif
500# define rwsem_release(l, n, i) lock_release(l, n, i)
501#else
502# define rwsem_acquire(l, s, t, i) do { } while (0)
503# define rwsem_acquire_read(l, s, t, i) do { } while (0)
504# define rwsem_release(l, n, i) do { } while (0)
505#endif
506
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200507#ifdef CONFIG_DEBUG_LOCK_ALLOC
508# ifdef CONFIG_PROVE_LOCKING
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200509# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200510# else
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200511# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200512# endif
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200513# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200514#else
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200515# define lock_map_acquire(l) do { } while (0)
516# define lock_map_release(l) do { } while (0)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200517#endif
518
Peter Zijlstra76b189e2008-09-10 09:57:35 +0200519#ifdef CONFIG_PROVE_LOCKING
520# define might_lock(lock) \
521do { \
522 typecheck(struct lockdep_map *, &(lock)->dep_map); \
523 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
524 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
525} while (0)
526# define might_lock_read(lock) \
527do { \
528 typecheck(struct lockdep_map *, &(lock)->dep_map); \
529 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
530 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
531} while (0)
532#else
533# define might_lock(lock) do { } while (0)
534# define might_lock_read(lock) do { } while (0)
535#endif
536
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700537#endif /* __LINUX_LOCKDEP_H */