blob: a03977a96d7eed76fbd9eb94da21e4954b0b0d13 [file] [log] [blame]
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001/*
2 * Runtime locking correctness validator
3 *
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -07004 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07006 *
7 * see Documentation/lockdep-design.txt for more details.
8 */
9#ifndef __LINUX_LOCKDEP_H
10#define __LINUX_LOCKDEP_H
11
Heiko Carstensa1e96b02007-02-12 00:52:20 -080012struct task_struct;
Peter Zijlstraf20786f2007-07-19 01:48:56 -070013struct lockdep_map;
Heiko Carstensa1e96b02007-02-12 00:52:20 -080014
Dave Young2edf5e42010-03-10 15:24:10 -080015/* for sysctl */
16extern int prove_locking;
17extern int lock_stat;
18
Michael S. Tsirkindb0b0ea2006-09-29 01:59:28 -070019#ifdef CONFIG_LOCKDEP
20
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070021#include <linux/linkage.h>
22#include <linux/list.h>
23#include <linux/debug_locks.h>
24#include <linux/stacktrace.h>
25
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070026/*
Peter Zijlstra98516732009-01-22 14:18:40 +010027 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
28 * the total number of states... :-(
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070029 */
Peter Zijlstra98516732009-01-22 14:18:40 +010030#define XXX_LOCK_USAGE_STATES (1+3*4)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070031
32#define MAX_LOCKDEP_SUBCLASSES 8UL
33
34/*
35 * Lock-classes are keyed via unique addresses, by embedding the
36 * lockclass-key into the kernel (or module) .data section. (For
37 * static locks we use the lock address itself as the key.)
38 */
39struct lockdep_subclass_key {
40 char __one_byte;
41} __attribute__ ((__packed__));
42
43struct lock_class_key {
44 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
45};
46
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +020047#define LOCKSTAT_POINTS 4
48
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070049/*
50 * The lock-class itself:
51 */
52struct lock_class {
53 /*
54 * class-hash:
55 */
56 struct list_head hash_entry;
57
58 /*
59 * global list of all lock-classes:
60 */
61 struct list_head lock_entry;
62
63 struct lockdep_subclass_key *key;
64 unsigned int subclass;
Ming Leie351b662009-07-22 22:48:09 +080065 unsigned int dep_gen_id;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070066
67 /*
68 * IRQ/softirq usage tracking bits:
69 */
70 unsigned long usage_mask;
Peter Zijlstra98516732009-01-22 14:18:40 +010071 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070072
73 /*
74 * These fields represent a directed graph of lock dependencies,
75 * to every node we attach a list of "forward" and a list of
76 * "backward" graph nodes.
77 */
78 struct list_head locks_after, locks_before;
79
80 /*
81 * Generation counter, when doing certain classes of graph walking,
82 * to ensure that we check one node only once:
83 */
84 unsigned int version;
85
86 /*
87 * Statistics counter:
88 */
89 unsigned long ops;
90
91 const char *name;
92 int name_version;
Peter Zijlstraf20786f2007-07-19 01:48:56 -070093
94#ifdef CONFIG_LOCK_STAT
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +020095 unsigned long contention_point[LOCKSTAT_POINTS];
96 unsigned long contending_point[LOCKSTAT_POINTS];
Peter Zijlstraf20786f2007-07-19 01:48:56 -070097#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070098};
99
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700100#ifdef CONFIG_LOCK_STAT
101struct lock_time {
102 s64 min;
103 s64 max;
104 s64 total;
105 unsigned long nr;
106};
107
Peter Zijlstra96645672007-07-19 01:49:00 -0700108enum bounce_type {
109 bounce_acquired_write,
110 bounce_acquired_read,
111 bounce_contended_write,
112 bounce_contended_read,
113 nr_bounce_types,
114
115 bounce_acquired = bounce_acquired_write,
116 bounce_contended = bounce_contended_write,
117};
118
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700119struct lock_class_stats {
120 unsigned long contention_point[4];
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200121 unsigned long contending_point[4];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700122 struct lock_time read_waittime;
123 struct lock_time write_waittime;
124 struct lock_time read_holdtime;
125 struct lock_time write_holdtime;
Peter Zijlstra96645672007-07-19 01:49:00 -0700126 unsigned long bounces[nr_bounce_types];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700127};
128
129struct lock_class_stats lock_stats(struct lock_class *class);
130void clear_lock_stats(struct lock_class *class);
131#endif
132
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700133/*
134 * Map the lock object (the lock instance) to the lock-class object.
135 * This is embedded into specific lock instances:
136 */
137struct lockdep_map {
138 struct lock_class_key *key;
Ingo Molnard6d897c2006-07-10 04:44:04 -0700139 struct lock_class *class_cache;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700140 const char *name;
Peter Zijlstra96645672007-07-19 01:49:00 -0700141#ifdef CONFIG_LOCK_STAT
142 int cpu;
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200143 unsigned long ip;
Peter Zijlstra96645672007-07-19 01:49:00 -0700144#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700145};
146
147/*
148 * Every lock has a list of other locks that were taken after it.
149 * We only grow the list, never remove from it:
150 */
151struct lock_list {
152 struct list_head entry;
153 struct lock_class *class;
154 struct stack_trace trace;
Jason Baron068135e2007-02-10 01:44:59 -0800155 int distance;
Ming Leic94aa5c2009-07-16 15:44:29 +0200156
Peter Zijlstraaf012962009-07-16 15:44:29 +0200157 /*
158 * The parent field is used to implement breadth-first search, and the
159 * bit 0 is reused to indicate if the lock has been accessed in BFS.
Ming Leic94aa5c2009-07-16 15:44:29 +0200160 */
161 struct lock_list *parent;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700162};
163
164/*
165 * We record lock dependency chains, so that we can cache them:
166 */
167struct lock_chain {
Huang, Ying443cd502008-06-20 16:39:21 +0800168 u8 irq_context;
169 u8 depth;
170 u16 base;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700171 struct list_head entry;
172 u64 chain_key;
173};
174
Ingo Molnare5f363e32008-08-11 12:37:27 +0200175#define MAX_LOCKDEP_KEYS_BITS 13
Peter Zijlstrab42e7372008-08-11 12:34:42 +0200176/*
177 * Subtract one because we offset hlock->class_idx by 1 in order
178 * to make 0 mean no class. This avoids overflowing the class_idx
179 * bitfield and hitting the BUG in hlock_class().
180 */
181#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
Dave Jonesf82b2172008-08-11 09:30:23 +0200182
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700183struct held_lock {
184 /*
185 * One-way hash of the dependency chain up to this point. We
186 * hash the hashes step by step as the dependency chain grows.
187 *
188 * We use it for dependency-caching and we skip detection
189 * passes and dependency-updates if there is a cache-hit, so
190 * it is absolutely critical for 100% coverage of the validator
191 * to have a unique key value for every unique dependency path
192 * that can occur in the system, to make a unique hash value
193 * as likely as possible - hence the 64-bit width.
194 *
195 * The task struct holds the current hash value (initialized
196 * with zero), here we store the previous hash value:
197 */
198 u64 prev_chain_key;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700199 unsigned long acquire_ip;
200 struct lockdep_map *instance;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200201 struct lockdep_map *nest_lock;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700202#ifdef CONFIG_LOCK_STAT
203 u64 waittime_stamp;
204 u64 holdtime_stamp;
205#endif
Dave Jonesf82b2172008-08-11 09:30:23 +0200206 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700207 /*
208 * The lock-stack is unified in that the lock chains of interrupt
209 * contexts nest ontop of process context chains, but we 'separate'
210 * the hashes by starting with 0 if we cross into an interrupt
211 * context, and we also keep do not add cross-context lock
212 * dependencies - the lock usage graph walking covers that area
213 * anyway, and we'd just unnecessarily increase the number of
214 * dependencies otherwise. [Note: hardirq and softirq contexts
215 * are separated from each other too.]
216 *
217 * The following field is used to detect when we cross into an
218 * interrupt context:
219 */
Dave Jonesf82b2172008-08-11 09:30:23 +0200220 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
Peter Zijlstrabb97a912009-07-20 19:15:35 +0200221 unsigned int trylock:1; /* 16 bits */
222
Dave Jonesf82b2172008-08-11 09:30:23 +0200223 unsigned int read:2; /* see lock_acquire() comment */
224 unsigned int check:2; /* see lock_acquire() comment */
225 unsigned int hardirqs_off:1;
Peter Zijlstrabb97a912009-07-20 19:15:35 +0200226 unsigned int references:11; /* 32 bits */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700227};
228
229/*
230 * Initialization, self-test and debugging-output methods:
231 */
232extern void lockdep_init(void);
233extern void lockdep_info(void);
234extern void lockdep_reset(void);
235extern void lockdep_reset_lock(struct lockdep_map *lock);
236extern void lockdep_free_key_range(void *start, unsigned long size);
Peter Zijlstrab351d162007-10-11 22:11:12 +0200237extern void lockdep_sys_exit(void);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700238
239extern void lockdep_off(void);
240extern void lockdep_on(void);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700241
242/*
243 * These methods are used by specific locking variants (spinlocks,
244 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
245 * to lockdep:
246 */
247
248extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400249 struct lock_class_key *key, int subclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700250
251/*
Peter Zijlstra851a67b2007-10-11 22:11:12 +0200252 * To initialize a lockdep_map statically use this macro.
253 * Note that _name must not be NULL.
254 */
255#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
256 { .name = (_name), .key = (void *)(_key), }
257
258/*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700259 * Reinitialize a lock key - for cases where there is special locking or
260 * special initialization of locks so that the validator gets the scope
261 * of dependencies wrong: they are either too broad (they need a class-split)
262 * or they are too narrow (they suffer from a false class-split):
263 */
264#define lockdep_set_class(lock, key) \
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400265 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700266#define lockdep_set_class_and_name(lock, key, name) \
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400267 lockdep_init_map(&(lock)->dep_map, name, key, 0)
268#define lockdep_set_class_and_subclass(lock, key, sub) \
269 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
270#define lockdep_set_subclass(lock, sub) \
271 lockdep_init_map(&(lock)->dep_map, #lock, \
272 (lock)->dep_map.key, sub)
Jan Kara9a7aa122009-06-04 15:26:49 +0200273/*
274 * Compare locking classes
275 */
276#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
277
278static inline int lockdep_match_key(struct lockdep_map *lock,
279 struct lock_class_key *key)
280{
281 return lock->key == key;
282}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700283
284/*
285 * Acquire a lock.
286 *
287 * Values for "read":
288 *
289 * 0: exclusive (write) acquire
290 * 1: read-acquire (no recursion allowed)
291 * 2: read-acquire with same-instance recursion allowed
292 *
293 * Values for check:
294 *
295 * 0: disabled
296 * 1: simple checks (freeing, held-at-exit-time, etc.)
297 * 2: full validation
298 */
299extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200300 int trylock, int read, int check,
301 struct lockdep_map *nest_lock, unsigned long ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700302
303extern void lock_release(struct lockdep_map *lock, int nested,
304 unsigned long ip);
305
Peter Zijlstraf607c662009-07-20 19:16:29 +0200306#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
307
308extern int lock_is_held(struct lockdep_map *lock);
309
Peter Zijlstra00ef9f72008-12-04 09:00:17 +0100310extern void lock_set_class(struct lockdep_map *lock, const char *name,
311 struct lock_class_key *key, unsigned int subclass,
312 unsigned long ip);
313
314static inline void lock_set_subclass(struct lockdep_map *lock,
315 unsigned int subclass, unsigned long ip)
316{
317 lock_set_class(lock, lock->name, lock->key, subclass, ip);
318}
Peter Zijlstra64aa3482008-08-11 09:30:21 +0200319
Nick Piggincf40bd12009-01-21 08:12:39 +0100320extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
321extern void lockdep_clear_current_reclaim_state(void);
322extern void lockdep_trace_alloc(gfp_t mask);
323
324# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700325
Jarek Poplawskie3a55fd2007-03-22 00:11:26 -0800326#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800327
Peter Zijlstraf607c662009-07-20 19:16:29 +0200328#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
329
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700330#else /* !LOCKDEP */
331
332static inline void lockdep_off(void)
333{
334}
335
336static inline void lockdep_on(void)
337{
338}
339
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200340# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700341# define lock_release(l, n, i) do { } while (0)
Peter Zijlstra00ef9f72008-12-04 09:00:17 +0100342# define lock_set_class(l, n, k, s, i) do { } while (0)
Peter Zijlstra64aa3482008-08-11 09:30:21 +0200343# define lock_set_subclass(l, s, i) do { } while (0)
Nick Piggincf40bd12009-01-21 08:12:39 +0100344# define lockdep_set_current_reclaim_state(g) do { } while (0)
345# define lockdep_clear_current_reclaim_state() do { } while (0)
346# define lockdep_trace_alloc(g) do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700347# define lockdep_init() do { } while (0)
348# define lockdep_info() do { } while (0)
Ingo Molnare25cf3d2008-10-17 15:55:07 +0200349# define lockdep_init_map(lock, name, key, sub) \
350 do { (void)(name); (void)(key); } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700351# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
352# define lockdep_set_class_and_name(lock, key, name) \
Ingo Molnare25cf3d2008-10-17 15:55:07 +0200353 do { (void)(key); (void)(name); } while (0)
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400354#define lockdep_set_class_and_subclass(lock, key, sub) \
355 do { (void)(key); } while (0)
Andrew Morton07646e22006-10-11 23:45:23 -0400356#define lockdep_set_subclass(lock, sub) do { } while (0)
Jan Kara9a7aa122009-06-04 15:26:49 +0200357/*
358 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
359 * case since the result is not well defined and the caller should rather
360 * #ifdef the call himself.
361 */
Andrew Morton07646e22006-10-11 23:45:23 -0400362
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700363# define INIT_LOCKDEP
364# define lockdep_reset() do { debug_locks = 1; } while (0)
365# define lockdep_free_key_range(start, size) do { } while (0)
Peter Zijlstrab351d162007-10-11 22:11:12 +0200366# define lockdep_sys_exit() do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700367/*
368 * The class key takes no space if lockdep is disabled:
369 */
370struct lock_class_key { };
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800371
372#define lockdep_depth(tsk) (0)
373
Peter Zijlstraf607c662009-07-20 19:16:29 +0200374#define lockdep_assert_held(l) do { } while (0)
375
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700376#endif /* !LOCKDEP */
377
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700378#ifdef CONFIG_LOCK_STAT
379
380extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200381extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700382
383#define LOCK_CONTENDED(_lock, try, lock) \
384do { \
385 if (!try(_lock)) { \
386 lock_contended(&(_lock)->dep_map, _RET_IP_); \
387 lock(_lock); \
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700388 } \
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200389 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700390} while (0)
391
392#else /* CONFIG_LOCK_STAT */
393
394#define lock_contended(lockdep_map, ip) do {} while (0)
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200395#define lock_acquired(lockdep_map, ip) do {} while (0)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700396
397#define LOCK_CONTENDED(_lock, try, lock) \
398 lock(_lock)
399
400#endif /* CONFIG_LOCK_STAT */
401
Robin Holte8c158b2009-04-02 16:59:45 -0700402#ifdef CONFIG_LOCKDEP
403
404/*
405 * On lockdep we dont want the hand-coded irq-enable of
406 * _raw_*_lock_flags() code, because lockdep assumes
407 * that interrupts are not re-enabled during lock-acquire:
408 */
409#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
410 LOCK_CONTENDED((_lock), (try), (lock))
411
412#else /* CONFIG_LOCKDEP */
413
414#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
415 lockfl((_lock), (flags))
416
417#endif /* CONFIG_LOCKDEP */
418
KOSAKI Motohiro74c8a612008-12-17 19:40:33 +0900419#ifdef CONFIG_GENERIC_HARDIRQS
Ingo Molnar243c7622006-07-03 00:25:06 -0700420extern void early_init_irq_lock_class(void);
421#else
Ingo Molnar3117df02006-12-13 00:34:43 -0800422static inline void early_init_irq_lock_class(void)
423{
424}
Ingo Molnar243c7622006-07-03 00:25:06 -0700425#endif
426
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700427#ifdef CONFIG_TRACE_IRQFLAGS
428extern void early_boot_irqs_off(void);
429extern void early_boot_irqs_on(void);
Ingo Molnar3117df02006-12-13 00:34:43 -0800430extern void print_irqtrace_events(struct task_struct *curr);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700431#else
Ingo Molnar3117df02006-12-13 00:34:43 -0800432static inline void early_boot_irqs_off(void)
433{
434}
435static inline void early_boot_irqs_on(void)
436{
437}
438static inline void print_irqtrace_events(struct task_struct *curr)
439{
440}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700441#endif
442
443/*
444 * For trivial one-depth nesting of a lock-class, the following
445 * global define can be used. (Subsystems with multiple levels
446 * of nesting should define their own lock-nesting subclasses.)
447 */
448#define SINGLE_DEPTH_NESTING 1
449
450/*
451 * Map the dependency ops to NOP or to real lockdep ops, depending
452 * on the per lock-class debug mode:
453 */
454
455#ifdef CONFIG_DEBUG_LOCK_ALLOC
456# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200457# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200458# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700459# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200460# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200461# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700462# endif
463# define spin_release(l, n, i) lock_release(l, n, i)
464#else
465# define spin_acquire(l, s, t, i) do { } while (0)
466# define spin_release(l, n, i) do { } while (0)
467#endif
468
469#ifdef CONFIG_DEBUG_LOCK_ALLOC
470# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200471# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
472# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700473# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200474# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
475# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700476# endif
477# define rwlock_release(l, n, i) lock_release(l, n, i)
478#else
479# define rwlock_acquire(l, s, t, i) do { } while (0)
480# define rwlock_acquire_read(l, s, t, i) do { } while (0)
481# define rwlock_release(l, n, i) do { } while (0)
482#endif
483
484#ifdef CONFIG_DEBUG_LOCK_ALLOC
485# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200486# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700487# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200488# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700489# endif
490# define mutex_release(l, n, i) lock_release(l, n, i)
491#else
492# define mutex_acquire(l, s, t, i) do { } while (0)
493# define mutex_release(l, n, i) do { } while (0)
494#endif
495
496#ifdef CONFIG_DEBUG_LOCK_ALLOC
497# ifdef CONFIG_PROVE_LOCKING
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200498# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
499# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700500# else
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200501# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
502# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700503# endif
504# define rwsem_release(l, n, i) lock_release(l, n, i)
505#else
506# define rwsem_acquire(l, s, t, i) do { } while (0)
507# define rwsem_acquire_read(l, s, t, i) do { } while (0)
508# define rwsem_release(l, n, i) do { } while (0)
509#endif
510
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200511#ifdef CONFIG_DEBUG_LOCK_ALLOC
512# ifdef CONFIG_PROVE_LOCKING
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200513# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200514# else
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200515# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200516# endif
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200517# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200518#else
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200519# define lock_map_acquire(l) do { } while (0)
520# define lock_map_release(l) do { } while (0)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200521#endif
522
Peter Zijlstra76b189e2008-09-10 09:57:35 +0200523#ifdef CONFIG_PROVE_LOCKING
524# define might_lock(lock) \
525do { \
526 typecheck(struct lockdep_map *, &(lock)->dep_map); \
527 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
528 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
529} while (0)
530# define might_lock_read(lock) \
531do { \
532 typecheck(struct lockdep_map *, &(lock)->dep_map); \
533 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
534 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
535} while (0)
536#else
537# define might_lock(lock) do { } while (0)
538# define might_lock_read(lock) do { } while (0)
539#endif
540
Paul E. McKenney0632eb32010-02-22 17:04:47 -0800541#ifdef CONFIG_PROVE_RCU
542extern void lockdep_rcu_dereference(const char *file, const int line);
543#endif
544
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700545#endif /* __LINUX_LOCKDEP_H */