blob: 3d3386b88b6aeb043f3bca3f49a75eb3f62e5c43 [file] [log] [blame]
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001/*
2 * Runtime locking correctness validator
3 *
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -07004 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07006 *
7 * see Documentation/lockdep-design.txt for more details.
8 */
9#ifndef __LINUX_LOCKDEP_H
10#define __LINUX_LOCKDEP_H
11
Heiko Carstensa1e96b02007-02-12 00:52:20 -080012struct task_struct;
Peter Zijlstraf20786f2007-07-19 01:48:56 -070013struct lockdep_map;
Heiko Carstensa1e96b02007-02-12 00:52:20 -080014
Michael S. Tsirkindb0b0ea2006-09-29 01:59:28 -070015#ifdef CONFIG_LOCKDEP
16
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070017#include <linux/linkage.h>
18#include <linux/list.h>
19#include <linux/debug_locks.h>
20#include <linux/stacktrace.h>
21
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070022/*
23 * Lock-class usage-state bits:
24 */
25enum lock_usage_bit
26{
27 LOCK_USED = 0,
28 LOCK_USED_IN_HARDIRQ,
29 LOCK_USED_IN_SOFTIRQ,
30 LOCK_ENABLED_SOFTIRQS,
31 LOCK_ENABLED_HARDIRQS,
32 LOCK_USED_IN_HARDIRQ_READ,
33 LOCK_USED_IN_SOFTIRQ_READ,
34 LOCK_ENABLED_SOFTIRQS_READ,
35 LOCK_ENABLED_HARDIRQS_READ,
36 LOCK_USAGE_STATES
37};
38
39/*
40 * Usage-state bitmasks:
41 */
42#define LOCKF_USED (1 << LOCK_USED)
43#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
44#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
45#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
46#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
47
48#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
49#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
50
51#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
52#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
53#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
54#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
55
56#define LOCKF_ENABLED_IRQS_READ \
57 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
58#define LOCKF_USED_IN_IRQ_READ \
59 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
60
61#define MAX_LOCKDEP_SUBCLASSES 8UL
62
63/*
64 * Lock-classes are keyed via unique addresses, by embedding the
65 * lockclass-key into the kernel (or module) .data section. (For
66 * static locks we use the lock address itself as the key.)
67 */
68struct lockdep_subclass_key {
69 char __one_byte;
70} __attribute__ ((__packed__));
71
72struct lock_class_key {
73 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
74};
75
76/*
77 * The lock-class itself:
78 */
79struct lock_class {
80 /*
81 * class-hash:
82 */
83 struct list_head hash_entry;
84
85 /*
86 * global list of all lock-classes:
87 */
88 struct list_head lock_entry;
89
90 struct lockdep_subclass_key *key;
91 unsigned int subclass;
92
93 /*
94 * IRQ/softirq usage tracking bits:
95 */
96 unsigned long usage_mask;
97 struct stack_trace usage_traces[LOCK_USAGE_STATES];
98
99 /*
100 * These fields represent a directed graph of lock dependencies,
101 * to every node we attach a list of "forward" and a list of
102 * "backward" graph nodes.
103 */
104 struct list_head locks_after, locks_before;
105
106 /*
107 * Generation counter, when doing certain classes of graph walking,
108 * to ensure that we check one node only once:
109 */
110 unsigned int version;
111
112 /*
113 * Statistics counter:
114 */
115 unsigned long ops;
116
117 const char *name;
118 int name_version;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700119
120#ifdef CONFIG_LOCK_STAT
121 unsigned long contention_point[4];
122#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700123};
124
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700125#ifdef CONFIG_LOCK_STAT
126struct lock_time {
127 s64 min;
128 s64 max;
129 s64 total;
130 unsigned long nr;
131};
132
133struct lock_class_stats {
134 unsigned long contention_point[4];
135 struct lock_time read_waittime;
136 struct lock_time write_waittime;
137 struct lock_time read_holdtime;
138 struct lock_time write_holdtime;
139};
140
141struct lock_class_stats lock_stats(struct lock_class *class);
142void clear_lock_stats(struct lock_class *class);
143#endif
144
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700145/*
146 * Map the lock object (the lock instance) to the lock-class object.
147 * This is embedded into specific lock instances:
148 */
149struct lockdep_map {
150 struct lock_class_key *key;
Ingo Molnard6d897c2006-07-10 04:44:04 -0700151 struct lock_class *class_cache;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700152 const char *name;
153};
154
155/*
156 * Every lock has a list of other locks that were taken after it.
157 * We only grow the list, never remove from it:
158 */
159struct lock_list {
160 struct list_head entry;
161 struct lock_class *class;
162 struct stack_trace trace;
Jason Baron068135e2007-02-10 01:44:59 -0800163 int distance;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700164};
165
166/*
167 * We record lock dependency chains, so that we can cache them:
168 */
169struct lock_chain {
170 struct list_head entry;
171 u64 chain_key;
172};
173
174struct held_lock {
175 /*
176 * One-way hash of the dependency chain up to this point. We
177 * hash the hashes step by step as the dependency chain grows.
178 *
179 * We use it for dependency-caching and we skip detection
180 * passes and dependency-updates if there is a cache-hit, so
181 * it is absolutely critical for 100% coverage of the validator
182 * to have a unique key value for every unique dependency path
183 * that can occur in the system, to make a unique hash value
184 * as likely as possible - hence the 64-bit width.
185 *
186 * The task struct holds the current hash value (initialized
187 * with zero), here we store the previous hash value:
188 */
189 u64 prev_chain_key;
190 struct lock_class *class;
191 unsigned long acquire_ip;
192 struct lockdep_map *instance;
193
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700194#ifdef CONFIG_LOCK_STAT
195 u64 waittime_stamp;
196 u64 holdtime_stamp;
197#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700198 /*
199 * The lock-stack is unified in that the lock chains of interrupt
200 * contexts nest ontop of process context chains, but we 'separate'
201 * the hashes by starting with 0 if we cross into an interrupt
202 * context, and we also keep do not add cross-context lock
203 * dependencies - the lock usage graph walking covers that area
204 * anyway, and we'd just unnecessarily increase the number of
205 * dependencies otherwise. [Note: hardirq and softirq contexts
206 * are separated from each other too.]
207 *
208 * The following field is used to detect when we cross into an
209 * interrupt context:
210 */
211 int irq_context;
212 int trylock;
213 int read;
214 int check;
215 int hardirqs_off;
216};
217
218/*
219 * Initialization, self-test and debugging-output methods:
220 */
221extern void lockdep_init(void);
222extern void lockdep_info(void);
223extern void lockdep_reset(void);
224extern void lockdep_reset_lock(struct lockdep_map *lock);
225extern void lockdep_free_key_range(void *start, unsigned long size);
226
227extern void lockdep_off(void);
228extern void lockdep_on(void);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700229
230/*
231 * These methods are used by specific locking variants (spinlocks,
232 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
233 * to lockdep:
234 */
235
236extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400237 struct lock_class_key *key, int subclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700238
239/*
240 * Reinitialize a lock key - for cases where there is special locking or
241 * special initialization of locks so that the validator gets the scope
242 * of dependencies wrong: they are either too broad (they need a class-split)
243 * or they are too narrow (they suffer from a false class-split):
244 */
245#define lockdep_set_class(lock, key) \
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400246 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700247#define lockdep_set_class_and_name(lock, key, name) \
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400248 lockdep_init_map(&(lock)->dep_map, name, key, 0)
249#define lockdep_set_class_and_subclass(lock, key, sub) \
250 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
251#define lockdep_set_subclass(lock, sub) \
252 lockdep_init_map(&(lock)->dep_map, #lock, \
253 (lock)->dep_map.key, sub)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700254
255/*
256 * Acquire a lock.
257 *
258 * Values for "read":
259 *
260 * 0: exclusive (write) acquire
261 * 1: read-acquire (no recursion allowed)
262 * 2: read-acquire with same-instance recursion allowed
263 *
264 * Values for check:
265 *
266 * 0: disabled
267 * 1: simple checks (freeing, held-at-exit-time, etc.)
268 * 2: full validation
269 */
270extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
271 int trylock, int read, int check, unsigned long ip);
272
273extern void lock_release(struct lockdep_map *lock, int nested,
274 unsigned long ip);
275
276# define INIT_LOCKDEP .lockdep_recursion = 0,
277
Jarek Poplawskie3a55fd2007-03-22 00:11:26 -0800278#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800279
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700280#else /* !LOCKDEP */
281
282static inline void lockdep_off(void)
283{
284}
285
286static inline void lockdep_on(void)
287{
288}
289
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700290# define lock_acquire(l, s, t, r, c, i) do { } while (0)
291# define lock_release(l, n, i) do { } while (0)
292# define lockdep_init() do { } while (0)
293# define lockdep_info() do { } while (0)
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400294# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700295# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
296# define lockdep_set_class_and_name(lock, key, name) \
297 do { (void)(key); } while (0)
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400298#define lockdep_set_class_and_subclass(lock, key, sub) \
299 do { (void)(key); } while (0)
Andrew Morton07646e22006-10-11 23:45:23 -0400300#define lockdep_set_subclass(lock, sub) do { } while (0)
301
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700302# define INIT_LOCKDEP
303# define lockdep_reset() do { debug_locks = 1; } while (0)
304# define lockdep_free_key_range(start, size) do { } while (0)
305/*
306 * The class key takes no space if lockdep is disabled:
307 */
308struct lock_class_key { };
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800309
310#define lockdep_depth(tsk) (0)
311
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700312#endif /* !LOCKDEP */
313
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700314#ifdef CONFIG_LOCK_STAT
315
316extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
317extern void lock_acquired(struct lockdep_map *lock);
318
319#define LOCK_CONTENDED(_lock, try, lock) \
320do { \
321 if (!try(_lock)) { \
322 lock_contended(&(_lock)->dep_map, _RET_IP_); \
323 lock(_lock); \
324 lock_acquired(&(_lock)->dep_map); \
325 } \
326} while (0)
327
328#else /* CONFIG_LOCK_STAT */
329
330#define lock_contended(lockdep_map, ip) do {} while (0)
331#define lock_acquired(lockdep_map) do {} while (0)
332
333#define LOCK_CONTENDED(_lock, try, lock) \
334 lock(_lock)
335
336#endif /* CONFIG_LOCK_STAT */
337
Ingo Molnar243c7622006-07-03 00:25:06 -0700338#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
339extern void early_init_irq_lock_class(void);
340#else
Ingo Molnar3117df02006-12-13 00:34:43 -0800341static inline void early_init_irq_lock_class(void)
342{
343}
Ingo Molnar243c7622006-07-03 00:25:06 -0700344#endif
345
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700346#ifdef CONFIG_TRACE_IRQFLAGS
347extern void early_boot_irqs_off(void);
348extern void early_boot_irqs_on(void);
Ingo Molnar3117df02006-12-13 00:34:43 -0800349extern void print_irqtrace_events(struct task_struct *curr);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700350#else
Ingo Molnar3117df02006-12-13 00:34:43 -0800351static inline void early_boot_irqs_off(void)
352{
353}
354static inline void early_boot_irqs_on(void)
355{
356}
357static inline void print_irqtrace_events(struct task_struct *curr)
358{
359}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700360#endif
361
362/*
363 * For trivial one-depth nesting of a lock-class, the following
364 * global define can be used. (Subsystems with multiple levels
365 * of nesting should define their own lock-nesting subclasses.)
366 */
367#define SINGLE_DEPTH_NESTING 1
368
369/*
370 * Map the dependency ops to NOP or to real lockdep ops, depending
371 * on the per lock-class debug mode:
372 */
373
374#ifdef CONFIG_DEBUG_LOCK_ALLOC
375# ifdef CONFIG_PROVE_LOCKING
376# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
377# else
378# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
379# endif
380# define spin_release(l, n, i) lock_release(l, n, i)
381#else
382# define spin_acquire(l, s, t, i) do { } while (0)
383# define spin_release(l, n, i) do { } while (0)
384#endif
385
386#ifdef CONFIG_DEBUG_LOCK_ALLOC
387# ifdef CONFIG_PROVE_LOCKING
388# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
389# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
390# else
391# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
392# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
393# endif
394# define rwlock_release(l, n, i) lock_release(l, n, i)
395#else
396# define rwlock_acquire(l, s, t, i) do { } while (0)
397# define rwlock_acquire_read(l, s, t, i) do { } while (0)
398# define rwlock_release(l, n, i) do { } while (0)
399#endif
400
401#ifdef CONFIG_DEBUG_LOCK_ALLOC
402# ifdef CONFIG_PROVE_LOCKING
403# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
404# else
405# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
406# endif
407# define mutex_release(l, n, i) lock_release(l, n, i)
408#else
409# define mutex_acquire(l, s, t, i) do { } while (0)
410# define mutex_release(l, n, i) do { } while (0)
411#endif
412
413#ifdef CONFIG_DEBUG_LOCK_ALLOC
414# ifdef CONFIG_PROVE_LOCKING
415# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
416# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
417# else
418# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
419# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
420# endif
421# define rwsem_release(l, n, i) lock_release(l, n, i)
422#else
423# define rwsem_acquire(l, s, t, i) do { } while (0)
424# define rwsem_acquire_read(l, s, t, i) do { } while (0)
425# define rwsem_release(l, n, i) do { } while (0)
426#endif
427
428#endif /* __LINUX_LOCKDEP_H */