blob: 6bd915df5fd34871134f59c136e5fc7d8786bc48 [file] [log] [blame]
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001/*
2 * kernel/lockdep.c
3 *
4 * Runtime locking correctness validator
5 *
6 * Started by Ingo Molnar:
7 *
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -07008 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070010 *
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
13 *
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
17 *
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
20 *
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
24 *
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
27 */
Steven Rostedta5e25882008-12-02 15:34:05 -050028#define DISABLE_BRANCH_PROFILING
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070029#include <linux/mutex.h>
30#include <linux/sched.h>
31#include <linux/delay.h>
32#include <linux/module.h>
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
35#include <linux/spinlock.h>
36#include <linux/kallsyms.h>
37#include <linux/interrupt.h>
38#include <linux/stacktrace.h>
39#include <linux/debug_locks.h>
40#include <linux/irqflags.h>
Dave Jones99de0552006-09-29 02:00:10 -070041#include <linux/utsname.h>
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -070042#include <linux/hash.h>
Steven Rostedt81d68a92008-05-12 21:20:42 +020043#include <linux/ftrace.h>
Peter Zijlstrab4b136f2009-01-29 14:50:36 +010044#include <linux/stringify.h>
Ming Leid588e462009-07-16 15:44:29 +020045#include <linux/bitops.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090046#include <linux/gfp.h>
Peter Zijlstraaf012962009-07-16 15:44:29 +020047
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070048#include <asm/sections.h>
49
50#include "lockdep_internals.h"
51
Steven Rostedta8d154b2009-04-10 09:36:00 -040052#define CREATE_TRACE_POINTS
Frederic Weisbecker67178762009-11-13 10:06:34 +010053#include <trace/events/lock.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040054
Peter Zijlstraf20786f2007-07-19 01:48:56 -070055#ifdef CONFIG_PROVE_LOCKING
56int prove_locking = 1;
57module_param(prove_locking, int, 0644);
58#else
59#define prove_locking 0
60#endif
61
62#ifdef CONFIG_LOCK_STAT
63int lock_stat = 1;
64module_param(lock_stat, int, 0644);
65#else
66#define lock_stat 0
67#endif
68
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070069/*
Ingo Molnar74c383f2006-12-13 00:34:43 -080070 * lockdep_lock: protects the lockdep graph, the hashes and the
71 * class/list/hash allocators.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070072 *
73 * This is one of the rare exceptions where it's justified
74 * to use a raw spinlock - we really dont want the spinlock
Ingo Molnar74c383f2006-12-13 00:34:43 -080075 * code to recurse back into the lockdep code...
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070076 */
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010077static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Ingo Molnar74c383f2006-12-13 00:34:43 -080078
79static int graph_lock(void)
80{
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010081 arch_spin_lock(&lockdep_lock);
Ingo Molnar74c383f2006-12-13 00:34:43 -080082 /*
83 * Make sure that if another CPU detected a bug while
84 * walking the graph we dont change it (while the other
85 * CPU is busy printing out stuff with the graph lock
86 * dropped already)
87 */
88 if (!debug_locks) {
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010089 arch_spin_unlock(&lockdep_lock);
Ingo Molnar74c383f2006-12-13 00:34:43 -080090 return 0;
91 }
Steven Rostedtbb065af2008-05-12 21:21:00 +020092 /* prevent any recursions within lockdep from causing deadlocks */
93 current->lockdep_recursion++;
Ingo Molnar74c383f2006-12-13 00:34:43 -080094 return 1;
95}
96
97static inline int graph_unlock(void)
98{
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010099 if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
Jarek Poplawski381a2292007-02-10 01:44:58 -0800100 return DEBUG_LOCKS_WARN_ON(1);
101
Steven Rostedtbb065af2008-05-12 21:21:00 +0200102 current->lockdep_recursion--;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100103 arch_spin_unlock(&lockdep_lock);
Ingo Molnar74c383f2006-12-13 00:34:43 -0800104 return 0;
105}
106
107/*
108 * Turn lock debugging off and return with 0 if it was off already,
109 * and also release the graph lock:
110 */
111static inline int debug_locks_off_graph_unlock(void)
112{
113 int ret = debug_locks_off();
114
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100115 arch_spin_unlock(&lockdep_lock);
Ingo Molnar74c383f2006-12-13 00:34:43 -0800116
117 return ret;
118}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700119
120static int lockdep_initialized;
121
122unsigned long nr_list_entries;
Peter Zijlstraaf012962009-07-16 15:44:29 +0200123static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700124
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700125/*
126 * All data structures here are protected by the global debug_lock.
127 *
128 * Mutex key structs only get allocated, once during bootup, and never
129 * get freed - this significantly simplifies the debugging code.
130 */
131unsigned long nr_lock_classes;
132static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
133
Dave Jonesf82b2172008-08-11 09:30:23 +0200134static inline struct lock_class *hlock_class(struct held_lock *hlock)
135{
136 if (!hlock->class_idx) {
137 DEBUG_LOCKS_WARN_ON(1);
138 return NULL;
139 }
140 return lock_classes + hlock->class_idx - 1;
141}
142
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700143#ifdef CONFIG_LOCK_STAT
Tejun Heo1871e522009-10-29 22:34:13 +0900144static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
145 cpu_lock_stats);
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700146
Peter Zijlstra3365e7792009-10-09 10:12:41 +0200147static inline u64 lockstat_clock(void)
148{
Peter Zijlstrac6763292010-05-25 10:48:51 +0200149 return local_clock();
Peter Zijlstra3365e7792009-10-09 10:12:41 +0200150}
151
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200152static int lock_point(unsigned long points[], unsigned long ip)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700153{
154 int i;
155
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200156 for (i = 0; i < LOCKSTAT_POINTS; i++) {
157 if (points[i] == 0) {
158 points[i] = ip;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700159 break;
160 }
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200161 if (points[i] == ip)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700162 break;
163 }
164
165 return i;
166}
167
Peter Zijlstra3365e7792009-10-09 10:12:41 +0200168static void lock_time_inc(struct lock_time *lt, u64 time)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700169{
170 if (time > lt->max)
171 lt->max = time;
172
Frank Rowand109d71c2009-11-19 13:42:06 -0800173 if (time < lt->min || !lt->nr)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700174 lt->min = time;
175
176 lt->total += time;
177 lt->nr++;
178}
179
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700180static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
181{
Frank Rowand109d71c2009-11-19 13:42:06 -0800182 if (!src->nr)
183 return;
184
185 if (src->max > dst->max)
186 dst->max = src->max;
187
188 if (src->min < dst->min || !dst->nr)
189 dst->min = src->min;
190
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700191 dst->total += src->total;
192 dst->nr += src->nr;
193}
194
195struct lock_class_stats lock_stats(struct lock_class *class)
196{
197 struct lock_class_stats stats;
198 int cpu, i;
199
200 memset(&stats, 0, sizeof(struct lock_class_stats));
201 for_each_possible_cpu(cpu) {
202 struct lock_class_stats *pcs =
Tejun Heo1871e522009-10-29 22:34:13 +0900203 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700204
205 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
206 stats.contention_point[i] += pcs->contention_point[i];
207
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200208 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
209 stats.contending_point[i] += pcs->contending_point[i];
210
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700211 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
212 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
213
214 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
215 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
Peter Zijlstra96645672007-07-19 01:49:00 -0700216
217 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
218 stats.bounces[i] += pcs->bounces[i];
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700219 }
220
221 return stats;
222}
223
224void clear_lock_stats(struct lock_class *class)
225{
226 int cpu;
227
228 for_each_possible_cpu(cpu) {
229 struct lock_class_stats *cpu_stats =
Tejun Heo1871e522009-10-29 22:34:13 +0900230 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700231
232 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
233 }
234 memset(class->contention_point, 0, sizeof(class->contention_point));
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200235 memset(class->contending_point, 0, sizeof(class->contending_point));
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700236}
237
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700238static struct lock_class_stats *get_lock_stats(struct lock_class *class)
239{
Tejun Heo1871e522009-10-29 22:34:13 +0900240 return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700241}
242
243static void put_lock_stats(struct lock_class_stats *stats)
244{
Tejun Heo1871e522009-10-29 22:34:13 +0900245 put_cpu_var(cpu_lock_stats);
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700246}
247
248static void lock_release_holdtime(struct held_lock *hlock)
249{
250 struct lock_class_stats *stats;
Peter Zijlstra3365e7792009-10-09 10:12:41 +0200251 u64 holdtime;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700252
253 if (!lock_stat)
254 return;
255
Peter Zijlstra3365e7792009-10-09 10:12:41 +0200256 holdtime = lockstat_clock() - hlock->holdtime_stamp;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700257
Dave Jonesf82b2172008-08-11 09:30:23 +0200258 stats = get_lock_stats(hlock_class(hlock));
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700259 if (hlock->read)
260 lock_time_inc(&stats->read_holdtime, holdtime);
261 else
262 lock_time_inc(&stats->write_holdtime, holdtime);
263 put_lock_stats(stats);
264}
265#else
266static inline void lock_release_holdtime(struct held_lock *hlock)
267{
268}
269#endif
270
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700271/*
272 * We keep a global list of all lock classes. The list only grows,
273 * never shrinks. The list is only accessed with the lockdep
274 * spinlock lock held.
275 */
276LIST_HEAD(all_lock_classes);
277
278/*
279 * The lockdep classes are in a hash-table as well, for fast lookup:
280 */
281#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
282#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700283#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700284#define classhashentry(key) (classhash_table + __classhashfn((key)))
285
286static struct list_head classhash_table[CLASSHASH_SIZE];
287
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700288/*
289 * We put the lock dependency chains into a hash-table as well, to cache
290 * their existence:
291 */
292#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
293#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700294#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700295#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
296
297static struct list_head chainhash_table[CHAINHASH_SIZE];
298
299/*
300 * The hash key of the lock dependency chains is a hash itself too:
301 * it's a hash of all locks taken up to that lock, including that lock.
302 * It's a 64-bit hash, because it's important for the keys to be
303 * unique.
304 */
305#define iterate_chain_key(key1, key2) \
Ingo Molnar03cbc352006-09-29 02:01:46 -0700306 (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
307 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700308 (key2))
309
Steven Rostedt1d09daa2008-05-12 21:20:55 +0200310void lockdep_off(void)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700311{
312 current->lockdep_recursion++;
313}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700314EXPORT_SYMBOL(lockdep_off);
315
Steven Rostedt1d09daa2008-05-12 21:20:55 +0200316void lockdep_on(void)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700317{
318 current->lockdep_recursion--;
319}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700320EXPORT_SYMBOL(lockdep_on);
321
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700322/*
323 * Debugging switches:
324 */
325
326#define VERBOSE 0
Ingo Molnar33e94e92006-12-13 00:34:41 -0800327#define VERY_VERBOSE 0
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700328
329#if VERBOSE
330# define HARDIRQ_VERBOSE 1
331# define SOFTIRQ_VERBOSE 1
Nick Piggincf40bd12009-01-21 08:12:39 +0100332# define RECLAIM_VERBOSE 1
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700333#else
334# define HARDIRQ_VERBOSE 0
335# define SOFTIRQ_VERBOSE 0
Nick Piggincf40bd12009-01-21 08:12:39 +0100336# define RECLAIM_VERBOSE 0
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700337#endif
338
Nick Piggincf40bd12009-01-21 08:12:39 +0100339#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700340/*
341 * Quick filtering for interesting events:
342 */
343static int class_filter(struct lock_class *class)
344{
Andi Kleenf9829cc2006-07-10 04:44:01 -0700345#if 0
346 /* Example */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700347 if (class->name_version == 1 &&
Andi Kleenf9829cc2006-07-10 04:44:01 -0700348 !strcmp(class->name, "lockname"))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700349 return 1;
350 if (class->name_version == 1 &&
Andi Kleenf9829cc2006-07-10 04:44:01 -0700351 !strcmp(class->name, "&struct->lockfield"))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700352 return 1;
Andi Kleenf9829cc2006-07-10 04:44:01 -0700353#endif
Ingo Molnara6640892006-12-13 00:34:39 -0800354 /* Filter everything else. 1 would be to allow everything else */
355 return 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700356}
357#endif
358
359static int verbose(struct lock_class *class)
360{
361#if VERBOSE
362 return class_filter(class);
363#endif
364 return 0;
365}
366
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700367/*
368 * Stack-trace: tightly packed array of stack backtrace
Ingo Molnar74c383f2006-12-13 00:34:43 -0800369 * addresses. Protected by the graph_lock.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700370 */
371unsigned long nr_stack_trace_entries;
372static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
373
374static int save_trace(struct stack_trace *trace)
375{
376 trace->nr_entries = 0;
377 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
378 trace->entries = stack_trace + nr_stack_trace_entries;
379
Andi Kleen5a1b3992006-09-26 10:52:34 +0200380 trace->skip = 3;
Andi Kleen5a1b3992006-09-26 10:52:34 +0200381
Christoph Hellwigab1b6f02007-05-08 00:23:29 -0700382 save_stack_trace(trace);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700383
Peter Zijlstra4f84f432009-07-20 15:27:04 +0200384 /*
385 * Some daft arches put -1 at the end to indicate its a full trace.
386 *
387 * <rant> this is buggy anyway, since it takes a whole extra entry so a
388 * complete trace that maxes out the entries provided will be reported
389 * as incomplete, friggin useless </rant>
390 */
Luck, Tonyea5b41f2009-12-09 14:29:36 -0800391 if (trace->nr_entries != 0 &&
392 trace->entries[trace->nr_entries-1] == ULONG_MAX)
Peter Zijlstra4f84f432009-07-20 15:27:04 +0200393 trace->nr_entries--;
394
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700395 trace->max_entries = trace->nr_entries;
396
397 nr_stack_trace_entries += trace->nr_entries;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700398
Peter Zijlstra4f84f432009-07-20 15:27:04 +0200399 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
Ingo Molnar74c383f2006-12-13 00:34:43 -0800400 if (!debug_locks_off_graph_unlock())
401 return 0;
402
403 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
404 printk("turning off the locking correctness validator.\n");
405 dump_stack();
406
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700407 return 0;
408 }
409
410 return 1;
411}
412
413unsigned int nr_hardirq_chains;
414unsigned int nr_softirq_chains;
415unsigned int nr_process_chains;
416unsigned int max_lockdep_depth;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700417
418#ifdef CONFIG_DEBUG_LOCKDEP
419/*
420 * We cannot printk in early bootup code. Not even early_printk()
421 * might work. So we mark any initialization errors and printk
422 * about it later on, in lockdep_info().
423 */
424static int lockdep_init_error;
Johannes Bergc71063c2007-07-19 01:49:02 -0700425static unsigned long lockdep_init_trace_data[20];
426static struct stack_trace lockdep_init_trace = {
427 .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
428 .entries = lockdep_init_trace_data,
429};
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700430
431/*
432 * Various lockdep statistics:
433 */
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200434DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700435#endif
436
437/*
438 * Locking printouts:
439 */
440
Peter Zijlstrafabe9c42009-01-22 14:51:01 +0100441#define __USAGE(__STATE) \
Peter Zijlstrab4b136f2009-01-29 14:50:36 +0100442 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
443 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
444 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
445 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
Peter Zijlstrafabe9c42009-01-22 14:51:01 +0100446
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700447static const char *usage_str[] =
448{
Peter Zijlstrafabe9c42009-01-22 14:51:01 +0100449#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
450#include "lockdep_states.h"
451#undef LOCKDEP_STATE
452 [LOCK_USED] = "INITIAL USE",
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700453};
454
455const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
456{
Alexey Dobriyanffb45122007-05-08 00:28:41 -0700457 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700458}
459
Peter Zijlstra3ff176c2009-01-22 17:40:42 +0100460static inline unsigned long lock_flag(enum lock_usage_bit bit)
461{
462 return 1UL << bit;
463}
464
465static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
466{
467 char c = '.';
468
469 if (class->usage_mask & lock_flag(bit + 2))
470 c = '+';
471 if (class->usage_mask & lock_flag(bit)) {
472 c = '-';
473 if (class->usage_mask & lock_flag(bit + 2))
474 c = '?';
475 }
476
477 return c;
478}
479
Peter Zijlstraf510b232009-01-22 17:53:47 +0100480void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700481{
Peter Zijlstraf510b232009-01-22 17:53:47 +0100482 int i = 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700483
Peter Zijlstraf510b232009-01-22 17:53:47 +0100484#define LOCKDEP_STATE(__STATE) \
485 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
486 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
487#include "lockdep_states.h"
488#undef LOCKDEP_STATE
489
490 usage[i] = '\0';
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700491}
492
Steven Rostedte5e78d02011-11-02 20:24:16 -0400493static void __print_lock_name(struct lock_class *class)
Steven Rostedt3003eba2011-04-20 21:41:54 -0400494{
495 char str[KSYM_NAME_LEN];
496 const char *name;
497
498 name = class->name;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700499 if (!name) {
500 name = __get_key_name(class->key, str);
Steven Rostedte5e78d02011-11-02 20:24:16 -0400501 printk("%s", name);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700502 } else {
Steven Rostedte5e78d02011-11-02 20:24:16 -0400503 printk("%s", name);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700504 if (class->name_version > 1)
505 printk("#%d", class->name_version);
506 if (class->subclass)
507 printk("/%d", class->subclass);
508 }
Steven Rostedte5e78d02011-11-02 20:24:16 -0400509}
510
511static void print_lock_name(struct lock_class *class)
512{
513 char usage[LOCK_USAGE_CHARS];
514
515 get_usage_chars(class, usage);
516
517 printk(" (");
518 __print_lock_name(class);
Peter Zijlstraf510b232009-01-22 17:53:47 +0100519 printk("){%s}", usage);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700520}
521
522static void print_lockdep_cache(struct lockdep_map *lock)
523{
524 const char *name;
Tejun Heo9281ace2007-07-17 04:03:51 -0700525 char str[KSYM_NAME_LEN];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700526
527 name = lock->name;
528 if (!name)
529 name = __get_key_name(lock->key->subkeys, str);
530
531 printk("%s", name);
532}
533
534static void print_lock(struct held_lock *hlock)
535{
Dave Jonesf82b2172008-08-11 09:30:23 +0200536 print_lock_name(hlock_class(hlock));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700537 printk(", at: ");
538 print_ip_sym(hlock->acquire_ip);
539}
540
541static void lockdep_print_held_locks(struct task_struct *curr)
542{
543 int i, depth = curr->lockdep_depth;
544
545 if (!depth) {
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700546 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700547 return;
548 }
549 printk("%d lock%s held by %s/%d:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700550 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700551
552 for (i = 0; i < depth; i++) {
553 printk(" #%d: ", i);
554 print_lock(curr->held_locks + i);
555 }
556}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700557
Dave Jones99de0552006-09-29 02:00:10 -0700558static void print_kernel_version(void)
559{
Serge E. Hallyn96b644b2006-10-02 02:18:13 -0700560 printk("%s %.*s\n", init_utsname()->release,
561 (int)strcspn(init_utsname()->version, " "),
562 init_utsname()->version);
Dave Jones99de0552006-09-29 02:00:10 -0700563}
564
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700565static int very_verbose(struct lock_class *class)
566{
567#if VERY_VERBOSE
568 return class_filter(class);
569#endif
570 return 0;
571}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700572
573/*
574 * Is this the address of a static object:
575 */
576static int static_obj(void *obj)
577{
578 unsigned long start = (unsigned long) &_stext,
579 end = (unsigned long) &_end,
580 addr = (unsigned long) obj;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700581
582 /*
583 * static variable?
584 */
585 if ((addr >= start) && (addr < end))
586 return 1;
587
Mike Frysinger2a9ad182009-09-22 16:44:16 -0700588 if (arch_is_kernel_data(addr))
589 return 1;
590
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700591 /*
Tejun Heo10fad5e2010-03-10 18:57:54 +0900592 * in-kernel percpu var?
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700593 */
Tejun Heo10fad5e2010-03-10 18:57:54 +0900594 if (is_kernel_percpu_address(addr))
595 return 1;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700596
597 /*
Tejun Heo10fad5e2010-03-10 18:57:54 +0900598 * module static or percpu var?
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700599 */
Tejun Heo10fad5e2010-03-10 18:57:54 +0900600 return is_module_address(addr) || is_module_percpu_address(addr);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700601}
602
603/*
604 * To make lock name printouts unique, we calculate a unique
605 * class->name_version generation counter:
606 */
607static int count_matching_names(struct lock_class *new_class)
608{
609 struct lock_class *class;
610 int count = 0;
611
612 if (!new_class->name)
613 return 0;
614
615 list_for_each_entry(class, &all_lock_classes, lock_entry) {
616 if (new_class->key - new_class->subclass == class->key)
617 return class->name_version;
618 if (class->name && !strcmp(class->name, new_class->name))
619 count = max(count, class->name_version);
620 }
621
622 return count + 1;
623}
624
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700625/*
626 * Register a lock's class in the hash-table, if the class is not present
627 * yet. Otherwise we look it up. We cache the result in the lock object
628 * itself, so actual lookup of the hash should be once per lock object.
629 */
630static inline struct lock_class *
Ingo Molnard6d897c2006-07-10 04:44:04 -0700631look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700632{
633 struct lockdep_subclass_key *key;
634 struct list_head *hash_head;
635 struct lock_class *class;
636
637#ifdef CONFIG_DEBUG_LOCKDEP
638 /*
639 * If the architecture calls into lockdep before initializing
640 * the hashes then we'll warn about it later. (we cannot printk
641 * right now)
642 */
643 if (unlikely(!lockdep_initialized)) {
644 lockdep_init();
645 lockdep_init_error = 1;
Johannes Bergc71063c2007-07-19 01:49:02 -0700646 save_stack_trace(&lockdep_init_trace);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700647 }
648#endif
649
Hitoshi Mitake4ba053c2010-10-13 17:30:26 +0900650 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
651 debug_locks_off();
652 printk(KERN_ERR
653 "BUG: looking up invalid subclass: %u\n", subclass);
654 printk(KERN_ERR
655 "turning off the locking correctness validator.\n");
656 dump_stack();
657 return NULL;
658 }
659
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700660 /*
661 * Static locks do not have their class-keys yet - for them the key
662 * is the lock object itself:
663 */
664 if (unlikely(!lock->key))
665 lock->key = (void *)lock;
666
667 /*
668 * NOTE: the class-key must be unique. For dynamic locks, a static
669 * lock_class_key variable is passed in through the mutex_init()
670 * (or spin_lock_init()) call - which acts as the key. For static
671 * locks we use the lock object itself as the key.
672 */
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700673 BUILD_BUG_ON(sizeof(struct lock_class_key) >
674 sizeof(struct lockdep_map));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700675
676 key = lock->key->subkeys + subclass;
677
678 hash_head = classhashentry(key);
679
680 /*
681 * We can walk the hash lockfree, because the hash only
682 * grows, and we are careful when adding entries to the end:
683 */
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700684 list_for_each_entry(class, hash_head, hash_entry) {
685 if (class->key == key) {
686 WARN_ON_ONCE(class->name != lock->name);
Ingo Molnard6d897c2006-07-10 04:44:04 -0700687 return class;
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700688 }
689 }
Ingo Molnard6d897c2006-07-10 04:44:04 -0700690
691 return NULL;
692}
693
694/*
695 * Register a lock's class in the hash-table, if the class is not present
696 * yet. Otherwise we look it up. We cache the result in the lock object
697 * itself, so actual lookup of the hash should be once per lock object.
698 */
699static inline struct lock_class *
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400700register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
Ingo Molnard6d897c2006-07-10 04:44:04 -0700701{
702 struct lockdep_subclass_key *key;
703 struct list_head *hash_head;
704 struct lock_class *class;
Ingo Molnar70e45062006-12-06 20:40:50 -0800705 unsigned long flags;
Ingo Molnard6d897c2006-07-10 04:44:04 -0700706
707 class = look_up_lock_class(lock, subclass);
708 if (likely(class))
709 return class;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700710
711 /*
712 * Debug-check: all keys must be persistent!
713 */
714 if (!static_obj(lock->key)) {
715 debug_locks_off();
716 printk("INFO: trying to register non-static key.\n");
717 printk("the code is fine but needs lockdep annotation.\n");
718 printk("turning off the locking correctness validator.\n");
719 dump_stack();
720
721 return NULL;
722 }
723
Ingo Molnard6d897c2006-07-10 04:44:04 -0700724 key = lock->key->subkeys + subclass;
725 hash_head = classhashentry(key);
726
Ingo Molnar70e45062006-12-06 20:40:50 -0800727 raw_local_irq_save(flags);
Ingo Molnar74c383f2006-12-13 00:34:43 -0800728 if (!graph_lock()) {
729 raw_local_irq_restore(flags);
730 return NULL;
731 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700732 /*
733 * We have to do the hash-walk again, to avoid races
734 * with another CPU:
735 */
736 list_for_each_entry(class, hash_head, hash_entry)
737 if (class->key == key)
738 goto out_unlock_set;
739 /*
740 * Allocate a new key from the static array, and add it to
741 * the hash:
742 */
743 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
Ingo Molnar74c383f2006-12-13 00:34:43 -0800744 if (!debug_locks_off_graph_unlock()) {
745 raw_local_irq_restore(flags);
746 return NULL;
747 }
Ingo Molnar70e45062006-12-06 20:40:50 -0800748 raw_local_irq_restore(flags);
Ingo Molnar74c383f2006-12-13 00:34:43 -0800749
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700750 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
751 printk("turning off the locking correctness validator.\n");
Peter Zijlstraeedeeab2009-03-18 12:38:47 +0100752 dump_stack();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700753 return NULL;
754 }
755 class = lock_classes + nr_lock_classes++;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200756 debug_atomic_inc(nr_unused_locks);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700757 class->key = key;
758 class->name = lock->name;
759 class->subclass = subclass;
760 INIT_LIST_HEAD(&class->lock_entry);
761 INIT_LIST_HEAD(&class->locks_before);
762 INIT_LIST_HEAD(&class->locks_after);
763 class->name_version = count_matching_names(class);
764 /*
765 * We use RCU's safe list-add method to make
766 * parallel walking of the hash-list safe:
767 */
768 list_add_tail_rcu(&class->hash_entry, hash_head);
Dale Farnsworth14811972008-02-25 23:03:02 +0100769 /*
770 * Add it to the global list of classes:
771 */
772 list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700773
774 if (verbose(class)) {
Ingo Molnar74c383f2006-12-13 00:34:43 -0800775 graph_unlock();
Ingo Molnar70e45062006-12-06 20:40:50 -0800776 raw_local_irq_restore(flags);
Ingo Molnar74c383f2006-12-13 00:34:43 -0800777
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700778 printk("\nnew class %p: %s", class->key, class->name);
779 if (class->name_version > 1)
780 printk("#%d", class->name_version);
781 printk("\n");
782 dump_stack();
Ingo Molnar74c383f2006-12-13 00:34:43 -0800783
Ingo Molnar70e45062006-12-06 20:40:50 -0800784 raw_local_irq_save(flags);
Ingo Molnar74c383f2006-12-13 00:34:43 -0800785 if (!graph_lock()) {
786 raw_local_irq_restore(flags);
787 return NULL;
788 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700789 }
790out_unlock_set:
Ingo Molnar74c383f2006-12-13 00:34:43 -0800791 graph_unlock();
Ingo Molnar70e45062006-12-06 20:40:50 -0800792 raw_local_irq_restore(flags);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700793
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400794 if (!subclass || force)
Hitoshi Mitake62016252010-10-05 18:01:51 +0900795 lock->class_cache[0] = class;
796 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
797 lock->class_cache[subclass] = class;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700798
Jarek Poplawski381a2292007-02-10 01:44:58 -0800799 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
800 return NULL;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700801
802 return class;
803}
804
Peter Zijlstraca58abc2007-07-19 01:48:53 -0700805#ifdef CONFIG_PROVE_LOCKING
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700806/*
Peter Zijlstra8e182572007-07-19 01:48:54 -0700807 * Allocate a lockdep entry. (assumes the graph_lock held, returns
808 * with NULL on failure)
809 */
810static struct lock_list *alloc_list_entry(void)
811{
812 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
813 if (!debug_locks_off_graph_unlock())
814 return NULL;
815
816 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
817 printk("turning off the locking correctness validator.\n");
Peter Zijlstraeedeeab2009-03-18 12:38:47 +0100818 dump_stack();
Peter Zijlstra8e182572007-07-19 01:48:54 -0700819 return NULL;
820 }
821 return list_entries + nr_list_entries++;
822}
823
824/*
825 * Add a new dependency to the head of the list:
826 */
827static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
Yong Zhang4726f2a2010-05-04 14:16:48 +0800828 struct list_head *head, unsigned long ip,
829 int distance, struct stack_trace *trace)
Peter Zijlstra8e182572007-07-19 01:48:54 -0700830{
831 struct lock_list *entry;
832 /*
833 * Lock not present yet - get a new dependency struct and
834 * add it to the list:
835 */
836 entry = alloc_list_entry();
837 if (!entry)
838 return 0;
839
Zhu Yi74870172008-08-27 14:33:00 +0800840 entry->class = this;
841 entry->distance = distance;
Yong Zhang4726f2a2010-05-04 14:16:48 +0800842 entry->trace = *trace;
Peter Zijlstra8e182572007-07-19 01:48:54 -0700843 /*
844 * Since we never remove from the dependency list, the list can
845 * be walked lockless by other CPUs, it's only allocation
846 * that must be protected by the spinlock. But this also means
847 * we must make new entries visible only once writes to the
848 * entry become visible - hence the RCU op:
849 */
850 list_add_tail_rcu(&entry->entry, head);
851
852 return 1;
853}
854
Peter Zijlstra98c33ed2009-07-21 13:19:07 +0200855/*
856 * For good efficiency of modular, we use power of 2
857 */
Peter Zijlstraaf012962009-07-16 15:44:29 +0200858#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
859#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
860
Peter Zijlstra98c33ed2009-07-21 13:19:07 +0200861/*
862 * The circular_queue and helpers is used to implement the
Peter Zijlstraaf012962009-07-16 15:44:29 +0200863 * breadth-first search(BFS)algorithem, by which we can build
864 * the shortest path from the next lock to be acquired to the
865 * previous held lock if there is a circular between them.
Peter Zijlstra98c33ed2009-07-21 13:19:07 +0200866 */
Peter Zijlstraaf012962009-07-16 15:44:29 +0200867struct circular_queue {
868 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
869 unsigned int front, rear;
870};
871
872static struct circular_queue lock_cq;
Peter Zijlstraaf012962009-07-16 15:44:29 +0200873
Ming Lei12f3dfd2009-07-16 15:44:29 +0200874unsigned int max_bfs_queue_depth;
Peter Zijlstraaf012962009-07-16 15:44:29 +0200875
Ming Leie351b662009-07-22 22:48:09 +0800876static unsigned int lockdep_dependency_gen_id;
877
Peter Zijlstraaf012962009-07-16 15:44:29 +0200878static inline void __cq_init(struct circular_queue *cq)
879{
880 cq->front = cq->rear = 0;
Ming Leie351b662009-07-22 22:48:09 +0800881 lockdep_dependency_gen_id++;
Peter Zijlstraaf012962009-07-16 15:44:29 +0200882}
883
884static inline int __cq_empty(struct circular_queue *cq)
885{
886 return (cq->front == cq->rear);
887}
888
889static inline int __cq_full(struct circular_queue *cq)
890{
891 return ((cq->rear + 1) & CQ_MASK) == cq->front;
892}
893
894static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
895{
896 if (__cq_full(cq))
897 return -1;
898
899 cq->element[cq->rear] = elem;
900 cq->rear = (cq->rear + 1) & CQ_MASK;
901 return 0;
902}
903
904static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
905{
906 if (__cq_empty(cq))
907 return -1;
908
909 *elem = cq->element[cq->front];
910 cq->front = (cq->front + 1) & CQ_MASK;
911 return 0;
912}
913
914static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
915{
916 return (cq->rear - cq->front) & CQ_MASK;
917}
918
919static inline void mark_lock_accessed(struct lock_list *lock,
920 struct lock_list *parent)
921{
922 unsigned long nr;
Peter Zijlstra98c33ed2009-07-21 13:19:07 +0200923
Peter Zijlstraaf012962009-07-16 15:44:29 +0200924 nr = lock - list_entries;
925 WARN_ON(nr >= nr_list_entries);
926 lock->parent = parent;
Ming Leie351b662009-07-22 22:48:09 +0800927 lock->class->dep_gen_id = lockdep_dependency_gen_id;
Peter Zijlstraaf012962009-07-16 15:44:29 +0200928}
929
930static inline unsigned long lock_accessed(struct lock_list *lock)
931{
932 unsigned long nr;
Peter Zijlstra98c33ed2009-07-21 13:19:07 +0200933
Peter Zijlstraaf012962009-07-16 15:44:29 +0200934 nr = lock - list_entries;
935 WARN_ON(nr >= nr_list_entries);
Ming Leie351b662009-07-22 22:48:09 +0800936 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
Peter Zijlstraaf012962009-07-16 15:44:29 +0200937}
938
939static inline struct lock_list *get_lock_parent(struct lock_list *child)
940{
941 return child->parent;
942}
943
944static inline int get_lock_depth(struct lock_list *child)
945{
946 int depth = 0;
947 struct lock_list *parent;
948
949 while ((parent = get_lock_parent(child))) {
950 child = parent;
951 depth++;
952 }
953 return depth;
954}
955
Ming Lei9e2d5512009-07-16 15:44:29 +0200956static int __bfs(struct lock_list *source_entry,
Peter Zijlstraaf012962009-07-16 15:44:29 +0200957 void *data,
958 int (*match)(struct lock_list *entry, void *data),
959 struct lock_list **target_entry,
960 int forward)
Ming Leic94aa5c2009-07-16 15:44:29 +0200961{
962 struct lock_list *entry;
Ming Leid588e462009-07-16 15:44:29 +0200963 struct list_head *head;
Ming Leic94aa5c2009-07-16 15:44:29 +0200964 struct circular_queue *cq = &lock_cq;
965 int ret = 1;
966
Ming Lei9e2d5512009-07-16 15:44:29 +0200967 if (match(source_entry, data)) {
Ming Leic94aa5c2009-07-16 15:44:29 +0200968 *target_entry = source_entry;
969 ret = 0;
970 goto exit;
971 }
972
Ming Leid588e462009-07-16 15:44:29 +0200973 if (forward)
974 head = &source_entry->class->locks_after;
975 else
976 head = &source_entry->class->locks_before;
977
978 if (list_empty(head))
979 goto exit;
980
981 __cq_init(cq);
Ming Leic94aa5c2009-07-16 15:44:29 +0200982 __cq_enqueue(cq, (unsigned long)source_entry);
983
984 while (!__cq_empty(cq)) {
985 struct lock_list *lock;
Ming Leic94aa5c2009-07-16 15:44:29 +0200986
987 __cq_dequeue(cq, (unsigned long *)&lock);
988
989 if (!lock->class) {
990 ret = -2;
991 goto exit;
992 }
993
994 if (forward)
995 head = &lock->class->locks_after;
996 else
997 head = &lock->class->locks_before;
998
999 list_for_each_entry(entry, head, entry) {
1000 if (!lock_accessed(entry)) {
Ming Lei12f3dfd2009-07-16 15:44:29 +02001001 unsigned int cq_depth;
Ming Leic94aa5c2009-07-16 15:44:29 +02001002 mark_lock_accessed(entry, lock);
Ming Lei9e2d5512009-07-16 15:44:29 +02001003 if (match(entry, data)) {
Ming Leic94aa5c2009-07-16 15:44:29 +02001004 *target_entry = entry;
1005 ret = 0;
1006 goto exit;
1007 }
1008
1009 if (__cq_enqueue(cq, (unsigned long)entry)) {
1010 ret = -1;
1011 goto exit;
1012 }
Ming Lei12f3dfd2009-07-16 15:44:29 +02001013 cq_depth = __cq_get_elem_count(cq);
1014 if (max_bfs_queue_depth < cq_depth)
1015 max_bfs_queue_depth = cq_depth;
Ming Leic94aa5c2009-07-16 15:44:29 +02001016 }
1017 }
1018 }
1019exit:
1020 return ret;
1021}
1022
Ming Leid7aaba12009-07-16 15:44:29 +02001023static inline int __bfs_forwards(struct lock_list *src_entry,
Ming Lei9e2d5512009-07-16 15:44:29 +02001024 void *data,
1025 int (*match)(struct lock_list *entry, void *data),
1026 struct lock_list **target_entry)
Ming Leic94aa5c2009-07-16 15:44:29 +02001027{
Ming Lei9e2d5512009-07-16 15:44:29 +02001028 return __bfs(src_entry, data, match, target_entry, 1);
Ming Leic94aa5c2009-07-16 15:44:29 +02001029
1030}
1031
Ming Leid7aaba12009-07-16 15:44:29 +02001032static inline int __bfs_backwards(struct lock_list *src_entry,
Ming Lei9e2d5512009-07-16 15:44:29 +02001033 void *data,
1034 int (*match)(struct lock_list *entry, void *data),
1035 struct lock_list **target_entry)
Ming Leic94aa5c2009-07-16 15:44:29 +02001036{
Ming Lei9e2d5512009-07-16 15:44:29 +02001037 return __bfs(src_entry, data, match, target_entry, 0);
Ming Leic94aa5c2009-07-16 15:44:29 +02001038
1039}
1040
Peter Zijlstra8e182572007-07-19 01:48:54 -07001041/*
1042 * Recursive, forwards-direction lock-dependency checking, used for
1043 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1044 * checking.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001045 */
Peter Zijlstra8e182572007-07-19 01:48:54 -07001046
1047/*
1048 * Print a dependency chain entry (this is only done when a deadlock
1049 * has been detected):
1050 */
1051static noinline int
Ming Lei24208ca2009-07-16 15:44:29 +02001052print_circular_bug_entry(struct lock_list *target, int depth)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001053{
1054 if (debug_locks_silent)
1055 return 0;
1056 printk("\n-> #%u", depth);
1057 print_lock_name(target->class);
1058 printk(":\n");
1059 print_stack_trace(&target->trace, 6);
1060
1061 return 0;
1062}
1063
Steven Rostedtf4185812011-04-20 21:41:55 -04001064static void
1065print_circular_lock_scenario(struct held_lock *src,
1066 struct held_lock *tgt,
1067 struct lock_list *prt)
1068{
1069 struct lock_class *source = hlock_class(src);
1070 struct lock_class *target = hlock_class(tgt);
1071 struct lock_class *parent = prt->class;
1072
1073 /*
1074 * A direct locking problem where unsafe_class lock is taken
1075 * directly by safe_class lock, then all we need to show
1076 * is the deadlock scenario, as it is obvious that the
1077 * unsafe lock is taken under the safe lock.
1078 *
1079 * But if there is a chain instead, where the safe lock takes
1080 * an intermediate lock (middle_class) where this lock is
1081 * not the same as the safe lock, then the lock chain is
1082 * used to describe the problem. Otherwise we would need
1083 * to show a different CPU case for each link in the chain
1084 * from the safe_class lock to the unsafe_class lock.
1085 */
1086 if (parent != source) {
1087 printk("Chain exists of:\n ");
1088 __print_lock_name(source);
1089 printk(" --> ");
1090 __print_lock_name(parent);
1091 printk(" --> ");
1092 __print_lock_name(target);
1093 printk("\n\n");
1094 }
1095
1096 printk(" Possible unsafe locking scenario:\n\n");
1097 printk(" CPU0 CPU1\n");
1098 printk(" ---- ----\n");
1099 printk(" lock(");
1100 __print_lock_name(target);
1101 printk(");\n");
1102 printk(" lock(");
1103 __print_lock_name(parent);
1104 printk(");\n");
1105 printk(" lock(");
1106 __print_lock_name(target);
1107 printk(");\n");
1108 printk(" lock(");
1109 __print_lock_name(source);
1110 printk(");\n");
1111 printk("\n *** DEADLOCK ***\n\n");
1112}
1113
Peter Zijlstra8e182572007-07-19 01:48:54 -07001114/*
1115 * When a circular dependency is detected, print the
1116 * header first:
1117 */
1118static noinline int
Ming Leidb0002a2009-07-16 15:44:29 +02001119print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1120 struct held_lock *check_src,
1121 struct held_lock *check_tgt)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001122{
1123 struct task_struct *curr = current;
1124
Ming Leic94aa5c2009-07-16 15:44:29 +02001125 if (debug_locks_silent)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001126 return 0;
1127
1128 printk("\n=======================================================\n");
1129 printk( "[ INFO: possible circular locking dependency detected ]\n");
1130 print_kernel_version();
1131 printk( "-------------------------------------------------------\n");
1132 printk("%s/%d is trying to acquire lock:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001133 curr->comm, task_pid_nr(curr));
Ming Leidb0002a2009-07-16 15:44:29 +02001134 print_lock(check_src);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001135 printk("\nbut task is already holding lock:\n");
Ming Leidb0002a2009-07-16 15:44:29 +02001136 print_lock(check_tgt);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001137 printk("\nwhich lock already depends on the new lock.\n\n");
1138 printk("\nthe existing dependency chain (in reverse order) is:\n");
1139
1140 print_circular_bug_entry(entry, depth);
1141
1142 return 0;
1143}
1144
Ming Lei9e2d5512009-07-16 15:44:29 +02001145static inline int class_equal(struct lock_list *entry, void *data)
1146{
1147 return entry->class == data;
1148}
1149
Ming Leidb0002a2009-07-16 15:44:29 +02001150static noinline int print_circular_bug(struct lock_list *this,
1151 struct lock_list *target,
1152 struct held_lock *check_src,
1153 struct held_lock *check_tgt)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001154{
1155 struct task_struct *curr = current;
Ming Leic94aa5c2009-07-16 15:44:29 +02001156 struct lock_list *parent;
Steven Rostedtf4185812011-04-20 21:41:55 -04001157 struct lock_list *first_parent;
Ming Lei24208ca2009-07-16 15:44:29 +02001158 int depth;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001159
Ming Leic94aa5c2009-07-16 15:44:29 +02001160 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001161 return 0;
1162
Ming Leidb0002a2009-07-16 15:44:29 +02001163 if (!save_trace(&this->trace))
Peter Zijlstra8e182572007-07-19 01:48:54 -07001164 return 0;
1165
Ming Leic94aa5c2009-07-16 15:44:29 +02001166 depth = get_lock_depth(target);
1167
Ming Leidb0002a2009-07-16 15:44:29 +02001168 print_circular_bug_header(target, depth, check_src, check_tgt);
Ming Leic94aa5c2009-07-16 15:44:29 +02001169
1170 parent = get_lock_parent(target);
Steven Rostedtf4185812011-04-20 21:41:55 -04001171 first_parent = parent;
Ming Leic94aa5c2009-07-16 15:44:29 +02001172
1173 while (parent) {
1174 print_circular_bug_entry(parent, --depth);
1175 parent = get_lock_parent(parent);
1176 }
Peter Zijlstra8e182572007-07-19 01:48:54 -07001177
1178 printk("\nother info that might help us debug this:\n\n");
Steven Rostedtf4185812011-04-20 21:41:55 -04001179 print_circular_lock_scenario(check_src, check_tgt,
1180 first_parent);
1181
Peter Zijlstra8e182572007-07-19 01:48:54 -07001182 lockdep_print_held_locks(curr);
1183
1184 printk("\nstack backtrace:\n");
1185 dump_stack();
1186
1187 return 0;
1188}
1189
Ming Leidb0002a2009-07-16 15:44:29 +02001190static noinline int print_bfs_bug(int ret)
1191{
1192 if (!debug_locks_off_graph_unlock())
1193 return 0;
1194
1195 WARN(1, "lockdep bfs error:%d\n", ret);
1196
1197 return 0;
1198}
1199
Ming Leief681022009-07-16 15:44:29 +02001200static int noop_count(struct lock_list *entry, void *data)
David Miller419ca3f2008-07-29 21:45:03 -07001201{
Ming Leief681022009-07-16 15:44:29 +02001202 (*(unsigned long *)data)++;
1203 return 0;
David Miller419ca3f2008-07-29 21:45:03 -07001204}
1205
Ming Leief681022009-07-16 15:44:29 +02001206unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1207{
1208 unsigned long count = 0;
1209 struct lock_list *uninitialized_var(target_entry);
1210
1211 __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1212
1213 return count;
1214}
David Miller419ca3f2008-07-29 21:45:03 -07001215unsigned long lockdep_count_forward_deps(struct lock_class *class)
1216{
1217 unsigned long ret, flags;
Ming Leief681022009-07-16 15:44:29 +02001218 struct lock_list this;
1219
1220 this.parent = NULL;
1221 this.class = class;
David Miller419ca3f2008-07-29 21:45:03 -07001222
1223 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001224 arch_spin_lock(&lockdep_lock);
Ming Leief681022009-07-16 15:44:29 +02001225 ret = __lockdep_count_forward_deps(&this);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001226 arch_spin_unlock(&lockdep_lock);
David Miller419ca3f2008-07-29 21:45:03 -07001227 local_irq_restore(flags);
1228
1229 return ret;
1230}
1231
Ming Leief681022009-07-16 15:44:29 +02001232unsigned long __lockdep_count_backward_deps(struct lock_list *this)
David Miller419ca3f2008-07-29 21:45:03 -07001233{
Ming Leief681022009-07-16 15:44:29 +02001234 unsigned long count = 0;
1235 struct lock_list *uninitialized_var(target_entry);
David Miller419ca3f2008-07-29 21:45:03 -07001236
Ming Leief681022009-07-16 15:44:29 +02001237 __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
David Miller419ca3f2008-07-29 21:45:03 -07001238
Ming Leief681022009-07-16 15:44:29 +02001239 return count;
David Miller419ca3f2008-07-29 21:45:03 -07001240}
1241
1242unsigned long lockdep_count_backward_deps(struct lock_class *class)
1243{
1244 unsigned long ret, flags;
Ming Leief681022009-07-16 15:44:29 +02001245 struct lock_list this;
1246
1247 this.parent = NULL;
1248 this.class = class;
David Miller419ca3f2008-07-29 21:45:03 -07001249
1250 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001251 arch_spin_lock(&lockdep_lock);
Ming Leief681022009-07-16 15:44:29 +02001252 ret = __lockdep_count_backward_deps(&this);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001253 arch_spin_unlock(&lockdep_lock);
David Miller419ca3f2008-07-29 21:45:03 -07001254 local_irq_restore(flags);
1255
1256 return ret;
1257}
1258
Peter Zijlstra8e182572007-07-19 01:48:54 -07001259/*
1260 * Prove that the dependency graph starting at <entry> can not
1261 * lead to <target>. Print an error and return 0 if it does.
1262 */
1263static noinline int
Ming Leidb0002a2009-07-16 15:44:29 +02001264check_noncircular(struct lock_list *root, struct lock_class *target,
1265 struct lock_list **target_entry)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001266{
Ming Leidb0002a2009-07-16 15:44:29 +02001267 int result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001268
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02001269 debug_atomic_inc(nr_cyclic_checks);
David Miller419ca3f2008-07-29 21:45:03 -07001270
Ming Leid7aaba12009-07-16 15:44:29 +02001271 result = __bfs_forwards(root, target, class_equal, target_entry);
Ming Leidb0002a2009-07-16 15:44:29 +02001272
1273 return result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001274}
1275
Steven Rostedt81d68a92008-05-12 21:20:42 +02001276#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001277/*
1278 * Forwards and backwards subgraph searching, for the purposes of
1279 * proving that two subgraphs can be connected by a new dependency
1280 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1281 */
Peter Zijlstra8e182572007-07-19 01:48:54 -07001282
Ming Leid7aaba12009-07-16 15:44:29 +02001283static inline int usage_match(struct lock_list *entry, void *bit)
1284{
1285 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1286}
1287
1288
1289
Peter Zijlstra8e182572007-07-19 01:48:54 -07001290/*
1291 * Find a node in the forwards-direction dependency sub-graph starting
Ming Leid7aaba12009-07-16 15:44:29 +02001292 * at @root->class that matches @bit.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001293 *
Ming Leid7aaba12009-07-16 15:44:29 +02001294 * Return 0 if such a node exists in the subgraph, and put that node
1295 * into *@target_entry.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001296 *
Ming Leid7aaba12009-07-16 15:44:29 +02001297 * Return 1 otherwise and keep *@target_entry unchanged.
1298 * Return <0 on error.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001299 */
Ming Leid7aaba12009-07-16 15:44:29 +02001300static int
1301find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1302 struct lock_list **target_entry)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001303{
Ming Leid7aaba12009-07-16 15:44:29 +02001304 int result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001305
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02001306 debug_atomic_inc(nr_find_usage_forwards_checks);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001307
Ming Leid7aaba12009-07-16 15:44:29 +02001308 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1309
1310 return result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001311}
1312
1313/*
1314 * Find a node in the backwards-direction dependency sub-graph starting
Ming Leid7aaba12009-07-16 15:44:29 +02001315 * at @root->class that matches @bit.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001316 *
Ming Leid7aaba12009-07-16 15:44:29 +02001317 * Return 0 if such a node exists in the subgraph, and put that node
1318 * into *@target_entry.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001319 *
Ming Leid7aaba12009-07-16 15:44:29 +02001320 * Return 1 otherwise and keep *@target_entry unchanged.
1321 * Return <0 on error.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001322 */
Ming Leid7aaba12009-07-16 15:44:29 +02001323static int
1324find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1325 struct lock_list **target_entry)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001326{
Ming Leid7aaba12009-07-16 15:44:29 +02001327 int result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001328
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02001329 debug_atomic_inc(nr_find_usage_backwards_checks);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001330
Ming Leid7aaba12009-07-16 15:44:29 +02001331 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
Dave Jonesf82b2172008-08-11 09:30:23 +02001332
Ming Leid7aaba12009-07-16 15:44:29 +02001333 return result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001334}
1335
Peter Zijlstraaf012962009-07-16 15:44:29 +02001336static void print_lock_class_header(struct lock_class *class, int depth)
1337{
1338 int bit;
1339
1340 printk("%*s->", depth, "");
1341 print_lock_name(class);
1342 printk(" ops: %lu", class->ops);
1343 printk(" {\n");
1344
1345 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1346 if (class->usage_mask & (1 << bit)) {
1347 int len = depth;
1348
1349 len += printk("%*s %s", depth, "", usage_str[bit]);
1350 len += printk(" at:\n");
1351 print_stack_trace(class->usage_traces + bit, len);
1352 }
1353 }
1354 printk("%*s }\n", depth, "");
1355
1356 printk("%*s ... key at: ",depth,"");
1357 print_ip_sym((unsigned long)class->key);
1358}
1359
1360/*
1361 * printk the shortest lock dependencies from @start to @end in reverse order:
1362 */
1363static void __used
1364print_shortest_lock_dependencies(struct lock_list *leaf,
1365 struct lock_list *root)
1366{
1367 struct lock_list *entry = leaf;
1368 int depth;
1369
1370 /*compute depth from generated tree by BFS*/
1371 depth = get_lock_depth(leaf);
1372
1373 do {
1374 print_lock_class_header(entry->class, depth);
1375 printk("%*s ... acquired at:\n", depth, "");
1376 print_stack_trace(&entry->trace, 2);
1377 printk("\n");
1378
1379 if (depth == 0 && (entry != root)) {
Steven Rostedt6be8c392011-04-20 21:41:58 -04001380 printk("lockdep:%s bad path found in chain graph\n", __func__);
Peter Zijlstraaf012962009-07-16 15:44:29 +02001381 break;
1382 }
1383
1384 entry = get_lock_parent(entry);
1385 depth--;
1386 } while (entry && (depth >= 0));
1387
1388 return;
1389}
Ming Leid7aaba12009-07-16 15:44:29 +02001390
Steven Rostedt3003eba2011-04-20 21:41:54 -04001391static void
1392print_irq_lock_scenario(struct lock_list *safe_entry,
1393 struct lock_list *unsafe_entry,
Steven Rostedtdad3d742011-04-20 21:41:57 -04001394 struct lock_class *prev_class,
1395 struct lock_class *next_class)
Steven Rostedt3003eba2011-04-20 21:41:54 -04001396{
1397 struct lock_class *safe_class = safe_entry->class;
1398 struct lock_class *unsafe_class = unsafe_entry->class;
Steven Rostedtdad3d742011-04-20 21:41:57 -04001399 struct lock_class *middle_class = prev_class;
Steven Rostedt3003eba2011-04-20 21:41:54 -04001400
1401 if (middle_class == safe_class)
Steven Rostedtdad3d742011-04-20 21:41:57 -04001402 middle_class = next_class;
Steven Rostedt3003eba2011-04-20 21:41:54 -04001403
1404 /*
1405 * A direct locking problem where unsafe_class lock is taken
1406 * directly by safe_class lock, then all we need to show
1407 * is the deadlock scenario, as it is obvious that the
1408 * unsafe lock is taken under the safe lock.
1409 *
1410 * But if there is a chain instead, where the safe lock takes
1411 * an intermediate lock (middle_class) where this lock is
1412 * not the same as the safe lock, then the lock chain is
1413 * used to describe the problem. Otherwise we would need
1414 * to show a different CPU case for each link in the chain
1415 * from the safe_class lock to the unsafe_class lock.
1416 */
1417 if (middle_class != unsafe_class) {
1418 printk("Chain exists of:\n ");
1419 __print_lock_name(safe_class);
1420 printk(" --> ");
1421 __print_lock_name(middle_class);
1422 printk(" --> ");
1423 __print_lock_name(unsafe_class);
1424 printk("\n\n");
1425 }
1426
1427 printk(" Possible interrupt unsafe locking scenario:\n\n");
1428 printk(" CPU0 CPU1\n");
1429 printk(" ---- ----\n");
1430 printk(" lock(");
1431 __print_lock_name(unsafe_class);
1432 printk(");\n");
1433 printk(" local_irq_disable();\n");
1434 printk(" lock(");
1435 __print_lock_name(safe_class);
1436 printk(");\n");
1437 printk(" lock(");
1438 __print_lock_name(middle_class);
1439 printk(");\n");
1440 printk(" <Interrupt>\n");
1441 printk(" lock(");
1442 __print_lock_name(safe_class);
1443 printk(");\n");
1444 printk("\n *** DEADLOCK ***\n\n");
1445}
1446
Peter Zijlstra8e182572007-07-19 01:48:54 -07001447static int
1448print_bad_irq_dependency(struct task_struct *curr,
Ming Lei24208ca2009-07-16 15:44:29 +02001449 struct lock_list *prev_root,
1450 struct lock_list *next_root,
1451 struct lock_list *backwards_entry,
1452 struct lock_list *forwards_entry,
Peter Zijlstra8e182572007-07-19 01:48:54 -07001453 struct held_lock *prev,
1454 struct held_lock *next,
1455 enum lock_usage_bit bit1,
1456 enum lock_usage_bit bit2,
1457 const char *irqclass)
1458{
1459 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1460 return 0;
1461
1462 printk("\n======================================================\n");
1463 printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1464 irqclass, irqclass);
1465 print_kernel_version();
1466 printk( "------------------------------------------------------\n");
1467 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001468 curr->comm, task_pid_nr(curr),
Peter Zijlstra8e182572007-07-19 01:48:54 -07001469 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1470 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1471 curr->hardirqs_enabled,
1472 curr->softirqs_enabled);
1473 print_lock(next);
1474
1475 printk("\nand this task is already holding:\n");
1476 print_lock(prev);
1477 printk("which would create a new lock dependency:\n");
Dave Jonesf82b2172008-08-11 09:30:23 +02001478 print_lock_name(hlock_class(prev));
Peter Zijlstra8e182572007-07-19 01:48:54 -07001479 printk(" ->");
Dave Jonesf82b2172008-08-11 09:30:23 +02001480 print_lock_name(hlock_class(next));
Peter Zijlstra8e182572007-07-19 01:48:54 -07001481 printk("\n");
1482
1483 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1484 irqclass);
Ming Lei24208ca2009-07-16 15:44:29 +02001485 print_lock_name(backwards_entry->class);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001486 printk("\n... which became %s-irq-safe at:\n", irqclass);
1487
Ming Lei24208ca2009-07-16 15:44:29 +02001488 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001489
1490 printk("\nto a %s-irq-unsafe lock:\n", irqclass);
Ming Lei24208ca2009-07-16 15:44:29 +02001491 print_lock_name(forwards_entry->class);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001492 printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1493 printk("...");
1494
Ming Lei24208ca2009-07-16 15:44:29 +02001495 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001496
1497 printk("\nother info that might help us debug this:\n\n");
Steven Rostedtdad3d742011-04-20 21:41:57 -04001498 print_irq_lock_scenario(backwards_entry, forwards_entry,
1499 hlock_class(prev), hlock_class(next));
Steven Rostedt3003eba2011-04-20 21:41:54 -04001500
Peter Zijlstra8e182572007-07-19 01:48:54 -07001501 lockdep_print_held_locks(curr);
1502
Ming Lei24208ca2009-07-16 15:44:29 +02001503 printk("\nthe dependencies between %s-irq-safe lock", irqclass);
1504 printk(" and the holding lock:\n");
1505 if (!save_trace(&prev_root->trace))
1506 return 0;
1507 print_shortest_lock_dependencies(backwards_entry, prev_root);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001508
Ming Lei24208ca2009-07-16 15:44:29 +02001509 printk("\nthe dependencies between the lock to be acquired");
1510 printk(" and %s-irq-unsafe lock:\n", irqclass);
1511 if (!save_trace(&next_root->trace))
1512 return 0;
1513 print_shortest_lock_dependencies(forwards_entry, next_root);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001514
1515 printk("\nstack backtrace:\n");
1516 dump_stack();
1517
1518 return 0;
1519}
1520
1521static int
1522check_usage(struct task_struct *curr, struct held_lock *prev,
1523 struct held_lock *next, enum lock_usage_bit bit_backwards,
1524 enum lock_usage_bit bit_forwards, const char *irqclass)
1525{
1526 int ret;
Ming Lei24208ca2009-07-16 15:44:29 +02001527 struct lock_list this, that;
Ming Leid7aaba12009-07-16 15:44:29 +02001528 struct lock_list *uninitialized_var(target_entry);
Ming Lei24208ca2009-07-16 15:44:29 +02001529 struct lock_list *uninitialized_var(target_entry1);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001530
Ming Leid7aaba12009-07-16 15:44:29 +02001531 this.parent = NULL;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001532
Ming Leid7aaba12009-07-16 15:44:29 +02001533 this.class = hlock_class(prev);
1534 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
Peter Zijlstraaf012962009-07-16 15:44:29 +02001535 if (ret < 0)
1536 return print_bfs_bug(ret);
1537 if (ret == 1)
1538 return ret;
Ming Leid7aaba12009-07-16 15:44:29 +02001539
Ming Lei24208ca2009-07-16 15:44:29 +02001540 that.parent = NULL;
1541 that.class = hlock_class(next);
1542 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
Peter Zijlstraaf012962009-07-16 15:44:29 +02001543 if (ret < 0)
1544 return print_bfs_bug(ret);
1545 if (ret == 1)
1546 return ret;
Ming Leid7aaba12009-07-16 15:44:29 +02001547
Ming Lei24208ca2009-07-16 15:44:29 +02001548 return print_bad_irq_dependency(curr, &this, &that,
1549 target_entry, target_entry1,
1550 prev, next,
Peter Zijlstra8e182572007-07-19 01:48:54 -07001551 bit_backwards, bit_forwards, irqclass);
1552}
1553
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001554static const char *state_names[] = {
1555#define LOCKDEP_STATE(__STATE) \
Peter Zijlstrab4b136f2009-01-29 14:50:36 +01001556 __stringify(__STATE),
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001557#include "lockdep_states.h"
1558#undef LOCKDEP_STATE
1559};
1560
1561static const char *state_rnames[] = {
1562#define LOCKDEP_STATE(__STATE) \
Peter Zijlstrab4b136f2009-01-29 14:50:36 +01001563 __stringify(__STATE)"-READ",
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001564#include "lockdep_states.h"
1565#undef LOCKDEP_STATE
1566};
1567
1568static inline const char *state_name(enum lock_usage_bit bit)
1569{
1570 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1571}
1572
1573static int exclusive_bit(int new_bit)
1574{
1575 /*
1576 * USED_IN
1577 * USED_IN_READ
1578 * ENABLED
1579 * ENABLED_READ
1580 *
1581 * bit 0 - write/read
1582 * bit 1 - used_in/enabled
1583 * bit 2+ state
1584 */
1585
1586 int state = new_bit & ~3;
1587 int dir = new_bit & 2;
1588
1589 /*
1590 * keep state, bit flip the direction and strip read.
1591 */
1592 return state | (dir ^ 2);
1593}
1594
1595static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1596 struct held_lock *next, enum lock_usage_bit bit)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001597{
1598 /*
1599 * Prove that the new dependency does not connect a hardirq-safe
1600 * lock with a hardirq-unsafe lock - to achieve this we search
1601 * the backwards-subgraph starting at <prev>, and the
1602 * forwards-subgraph starting at <next>:
1603 */
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001604 if (!check_usage(curr, prev, next, bit,
1605 exclusive_bit(bit), state_name(bit)))
Peter Zijlstra8e182572007-07-19 01:48:54 -07001606 return 0;
1607
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001608 bit++; /* _READ */
1609
Peter Zijlstra8e182572007-07-19 01:48:54 -07001610 /*
1611 * Prove that the new dependency does not connect a hardirq-safe-read
1612 * lock with a hardirq-unsafe lock - to achieve this we search
1613 * the backwards-subgraph starting at <prev>, and the
1614 * forwards-subgraph starting at <next>:
1615 */
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001616 if (!check_usage(curr, prev, next, bit,
1617 exclusive_bit(bit), state_name(bit)))
Peter Zijlstra8e182572007-07-19 01:48:54 -07001618 return 0;
1619
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001620 return 1;
1621}
Peter Zijlstra8e182572007-07-19 01:48:54 -07001622
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001623static int
1624check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1625 struct held_lock *next)
1626{
1627#define LOCKDEP_STATE(__STATE) \
1628 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
Nick Piggincf40bd12009-01-21 08:12:39 +01001629 return 0;
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001630#include "lockdep_states.h"
1631#undef LOCKDEP_STATE
Nick Piggincf40bd12009-01-21 08:12:39 +01001632
Peter Zijlstra8e182572007-07-19 01:48:54 -07001633 return 1;
1634}
1635
1636static void inc_chains(void)
1637{
1638 if (current->hardirq_context)
1639 nr_hardirq_chains++;
1640 else {
1641 if (current->softirq_context)
1642 nr_softirq_chains++;
1643 else
1644 nr_process_chains++;
1645 }
1646}
1647
1648#else
1649
1650static inline int
1651check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1652 struct held_lock *next)
1653{
1654 return 1;
1655}
1656
1657static inline void inc_chains(void)
1658{
1659 nr_process_chains++;
1660}
1661
1662#endif
1663
Steven Rostedt48702ec2011-04-20 21:41:56 -04001664static void
1665print_deadlock_scenario(struct held_lock *nxt,
1666 struct held_lock *prv)
1667{
1668 struct lock_class *next = hlock_class(nxt);
1669 struct lock_class *prev = hlock_class(prv);
1670
1671 printk(" Possible unsafe locking scenario:\n\n");
1672 printk(" CPU0\n");
1673 printk(" ----\n");
1674 printk(" lock(");
1675 __print_lock_name(prev);
1676 printk(");\n");
1677 printk(" lock(");
1678 __print_lock_name(next);
1679 printk(");\n");
1680 printk("\n *** DEADLOCK ***\n\n");
1681 printk(" May be due to missing lock nesting notation\n\n");
1682}
1683
Peter Zijlstra8e182572007-07-19 01:48:54 -07001684static int
1685print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1686 struct held_lock *next)
1687{
1688 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1689 return 0;
1690
1691 printk("\n=============================================\n");
1692 printk( "[ INFO: possible recursive locking detected ]\n");
1693 print_kernel_version();
1694 printk( "---------------------------------------------\n");
1695 printk("%s/%d is trying to acquire lock:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001696 curr->comm, task_pid_nr(curr));
Peter Zijlstra8e182572007-07-19 01:48:54 -07001697 print_lock(next);
1698 printk("\nbut task is already holding lock:\n");
1699 print_lock(prev);
1700
1701 printk("\nother info that might help us debug this:\n");
Steven Rostedt48702ec2011-04-20 21:41:56 -04001702 print_deadlock_scenario(next, prev);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001703 lockdep_print_held_locks(curr);
1704
1705 printk("\nstack backtrace:\n");
1706 dump_stack();
1707
1708 return 0;
1709}
1710
1711/*
1712 * Check whether we are holding such a class already.
1713 *
1714 * (Note that this has to be done separately, because the graph cannot
1715 * detect such classes of deadlocks.)
1716 *
1717 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1718 */
1719static int
1720check_deadlock(struct task_struct *curr, struct held_lock *next,
1721 struct lockdep_map *next_instance, int read)
1722{
1723 struct held_lock *prev;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02001724 struct held_lock *nest = NULL;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001725 int i;
1726
1727 for (i = 0; i < curr->lockdep_depth; i++) {
1728 prev = curr->held_locks + i;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02001729
1730 if (prev->instance == next->nest_lock)
1731 nest = prev;
1732
Dave Jonesf82b2172008-08-11 09:30:23 +02001733 if (hlock_class(prev) != hlock_class(next))
Peter Zijlstra8e182572007-07-19 01:48:54 -07001734 continue;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02001735
Peter Zijlstra8e182572007-07-19 01:48:54 -07001736 /*
1737 * Allow read-after-read recursion of the same
1738 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1739 */
1740 if ((read == 2) && prev->read)
1741 return 2;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02001742
1743 /*
1744 * We're holding the nest_lock, which serializes this lock's
1745 * nesting behaviour.
1746 */
1747 if (nest)
1748 return 2;
1749
Peter Zijlstra8e182572007-07-19 01:48:54 -07001750 return print_deadlock_bug(curr, prev, next);
1751 }
1752 return 1;
1753}
1754
1755/*
1756 * There was a chain-cache miss, and we are about to add a new dependency
1757 * to a previous lock. We recursively validate the following rules:
1758 *
1759 * - would the adding of the <prev> -> <next> dependency create a
1760 * circular dependency in the graph? [== circular deadlock]
1761 *
1762 * - does the new prev->next dependency connect any hardirq-safe lock
1763 * (in the full backwards-subgraph starting at <prev>) with any
1764 * hardirq-unsafe lock (in the full forwards-subgraph starting at
1765 * <next>)? [== illegal lock inversion with hardirq contexts]
1766 *
1767 * - does the new prev->next dependency connect any softirq-safe lock
1768 * (in the full backwards-subgraph starting at <prev>) with any
1769 * softirq-unsafe lock (in the full forwards-subgraph starting at
1770 * <next>)? [== illegal lock inversion with softirq contexts]
1771 *
1772 * any of these scenarios could lead to a deadlock.
1773 *
1774 * Then if all the validations pass, we add the forwards and backwards
1775 * dependency.
1776 */
1777static int
1778check_prev_add(struct task_struct *curr, struct held_lock *prev,
Yong Zhang4726f2a2010-05-04 14:16:48 +08001779 struct held_lock *next, int distance, int trylock_loop)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001780{
1781 struct lock_list *entry;
1782 int ret;
Ming Leidb0002a2009-07-16 15:44:29 +02001783 struct lock_list this;
1784 struct lock_list *uninitialized_var(target_entry);
Yong Zhang4726f2a2010-05-04 14:16:48 +08001785 /*
1786 * Static variable, serialized by the graph_lock().
1787 *
1788 * We use this static variable to save the stack trace in case
1789 * we call into this function multiple times due to encountering
1790 * trylocks in the held lock stack.
1791 */
1792 static struct stack_trace trace;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001793
1794 /*
1795 * Prove that the new <prev> -> <next> dependency would not
1796 * create a circular dependency in the graph. (We do this by
1797 * forward-recursing into the graph starting at <next>, and
1798 * checking whether we can reach <prev>.)
1799 *
1800 * We are using global variables to control the recursion, to
1801 * keep the stackframe size of the recursive functions low:
1802 */
Ming Leidb0002a2009-07-16 15:44:29 +02001803 this.class = hlock_class(next);
1804 this.parent = NULL;
1805 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1806 if (unlikely(!ret))
1807 return print_circular_bug(&this, target_entry, next, prev);
1808 else if (unlikely(ret < 0))
1809 return print_bfs_bug(ret);
Ming Leic94aa5c2009-07-16 15:44:29 +02001810
Peter Zijlstra8e182572007-07-19 01:48:54 -07001811 if (!check_prev_add_irq(curr, prev, next))
1812 return 0;
1813
1814 /*
1815 * For recursive read-locks we do all the dependency checks,
1816 * but we dont store read-triggered dependencies (only
1817 * write-triggered dependencies). This ensures that only the
1818 * write-side dependencies matter, and that if for example a
1819 * write-lock never takes any other locks, then the reads are
1820 * equivalent to a NOP.
1821 */
1822 if (next->read == 2 || prev->read == 2)
1823 return 1;
1824 /*
1825 * Is the <prev> -> <next> dependency already present?
1826 *
1827 * (this may occur even though this is a new chain: consider
1828 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1829 * chains - the second one will be new, but L1 already has
1830 * L2 added to its dependency list, due to the first chain.)
1831 */
Dave Jonesf82b2172008-08-11 09:30:23 +02001832 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1833 if (entry->class == hlock_class(next)) {
Peter Zijlstra8e182572007-07-19 01:48:54 -07001834 if (distance == 1)
1835 entry->distance = 1;
1836 return 2;
1837 }
1838 }
1839
Yong Zhang4726f2a2010-05-04 14:16:48 +08001840 if (!trylock_loop && !save_trace(&trace))
1841 return 0;
1842
Peter Zijlstra8e182572007-07-19 01:48:54 -07001843 /*
1844 * Ok, all validations passed, add the new lock
1845 * to the previous lock's dependency list:
1846 */
Dave Jonesf82b2172008-08-11 09:30:23 +02001847 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1848 &hlock_class(prev)->locks_after,
Yong Zhang4726f2a2010-05-04 14:16:48 +08001849 next->acquire_ip, distance, &trace);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001850
1851 if (!ret)
1852 return 0;
1853
Dave Jonesf82b2172008-08-11 09:30:23 +02001854 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1855 &hlock_class(next)->locks_before,
Yong Zhang4726f2a2010-05-04 14:16:48 +08001856 next->acquire_ip, distance, &trace);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001857 if (!ret)
1858 return 0;
1859
1860 /*
1861 * Debugging printouts:
1862 */
Dave Jonesf82b2172008-08-11 09:30:23 +02001863 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
Peter Zijlstra8e182572007-07-19 01:48:54 -07001864 graph_unlock();
1865 printk("\n new dependency: ");
Dave Jonesf82b2172008-08-11 09:30:23 +02001866 print_lock_name(hlock_class(prev));
Peter Zijlstra8e182572007-07-19 01:48:54 -07001867 printk(" => ");
Dave Jonesf82b2172008-08-11 09:30:23 +02001868 print_lock_name(hlock_class(next));
Peter Zijlstra8e182572007-07-19 01:48:54 -07001869 printk("\n");
1870 dump_stack();
1871 return graph_lock();
1872 }
1873 return 1;
1874}
1875
1876/*
1877 * Add the dependency to all directly-previous locks that are 'relevant'.
1878 * The ones that are relevant are (in increasing distance from curr):
1879 * all consecutive trylock entries and the final non-trylock entry - or
1880 * the end of this context's lock-chain - whichever comes first.
1881 */
1882static int
1883check_prevs_add(struct task_struct *curr, struct held_lock *next)
1884{
1885 int depth = curr->lockdep_depth;
Yong Zhang4726f2a2010-05-04 14:16:48 +08001886 int trylock_loop = 0;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001887 struct held_lock *hlock;
1888
1889 /*
1890 * Debugging checks.
1891 *
1892 * Depth must not be zero for a non-head lock:
1893 */
1894 if (!depth)
1895 goto out_bug;
1896 /*
1897 * At least two relevant locks must exist for this
1898 * to be a head:
1899 */
1900 if (curr->held_locks[depth].irq_context !=
1901 curr->held_locks[depth-1].irq_context)
1902 goto out_bug;
1903
1904 for (;;) {
1905 int distance = curr->lockdep_depth - depth + 1;
1906 hlock = curr->held_locks + depth-1;
1907 /*
1908 * Only non-recursive-read entries get new dependencies
1909 * added:
1910 */
1911 if (hlock->read != 2) {
Yong Zhang4726f2a2010-05-04 14:16:48 +08001912 if (!check_prev_add(curr, hlock, next,
1913 distance, trylock_loop))
Peter Zijlstra8e182572007-07-19 01:48:54 -07001914 return 0;
1915 /*
1916 * Stop after the first non-trylock entry,
1917 * as non-trylock entries have added their
1918 * own direct dependencies already, so this
1919 * lock is connected to them indirectly:
1920 */
1921 if (!hlock->trylock)
1922 break;
1923 }
1924 depth--;
1925 /*
1926 * End of lock-stack?
1927 */
1928 if (!depth)
1929 break;
1930 /*
1931 * Stop the search if we cross into another context:
1932 */
1933 if (curr->held_locks[depth].irq_context !=
1934 curr->held_locks[depth-1].irq_context)
1935 break;
Yong Zhang4726f2a2010-05-04 14:16:48 +08001936 trylock_loop = 1;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001937 }
1938 return 1;
1939out_bug:
1940 if (!debug_locks_off_graph_unlock())
1941 return 0;
1942
1943 WARN_ON(1);
1944
1945 return 0;
1946}
1947
1948unsigned long nr_lock_chains;
Huang, Ying443cd502008-06-20 16:39:21 +08001949struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
Huang, Yingcd1a28e2008-06-23 11:20:54 +08001950int nr_chain_hlocks;
Huang, Ying443cd502008-06-20 16:39:21 +08001951static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1952
1953struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1954{
1955 return lock_classes + chain_hlocks[chain->base + i];
1956}
Peter Zijlstra8e182572007-07-19 01:48:54 -07001957
1958/*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001959 * Look up a dependency chain. If the key is not present yet then
Jarek Poplawski9e860d02007-05-08 00:30:12 -07001960 * add it and return 1 - in this case the new dependency chain is
1961 * validated. If the key is already hashed, return 0.
1962 * (On return with 1 graph_lock is held.)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001963 */
Huang, Ying443cd502008-06-20 16:39:21 +08001964static inline int lookup_chain_cache(struct task_struct *curr,
1965 struct held_lock *hlock,
1966 u64 chain_key)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001967{
Dave Jonesf82b2172008-08-11 09:30:23 +02001968 struct lock_class *class = hlock_class(hlock);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001969 struct list_head *hash_head = chainhashentry(chain_key);
1970 struct lock_chain *chain;
Huang, Ying443cd502008-06-20 16:39:21 +08001971 struct held_lock *hlock_curr, *hlock_next;
Steven Rostedte0944ee2011-04-20 21:42:00 -04001972 int i, j;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001973
Jarek Poplawski381a2292007-02-10 01:44:58 -08001974 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1975 return 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001976 /*
1977 * We can walk it lock-free, because entries only get added
1978 * to the hash:
1979 */
1980 list_for_each_entry(chain, hash_head, entry) {
1981 if (chain->chain_key == chain_key) {
1982cache_hit:
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02001983 debug_atomic_inc(chain_lookup_hits);
Ingo Molnar81fc6852006-12-13 00:34:40 -08001984 if (very_verbose(class))
Andrew Morton755cd902006-12-29 16:49:14 -08001985 printk("\nhash chain already cached, key: "
1986 "%016Lx tail class: [%p] %s\n",
1987 (unsigned long long)chain_key,
1988 class->key, class->name);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001989 return 0;
1990 }
1991 }
Ingo Molnar81fc6852006-12-13 00:34:40 -08001992 if (very_verbose(class))
Andrew Morton755cd902006-12-29 16:49:14 -08001993 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1994 (unsigned long long)chain_key, class->key, class->name);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001995 /*
1996 * Allocate a new chain entry from the static array, and add
1997 * it to the hash:
1998 */
Ingo Molnar74c383f2006-12-13 00:34:43 -08001999 if (!graph_lock())
2000 return 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002001 /*
2002 * We have to walk the chain again locked - to avoid duplicates:
2003 */
2004 list_for_each_entry(chain, hash_head, entry) {
2005 if (chain->chain_key == chain_key) {
Ingo Molnar74c383f2006-12-13 00:34:43 -08002006 graph_unlock();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002007 goto cache_hit;
2008 }
2009 }
2010 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
Ingo Molnar74c383f2006-12-13 00:34:43 -08002011 if (!debug_locks_off_graph_unlock())
2012 return 0;
2013
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002014 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
2015 printk("turning off the locking correctness validator.\n");
Peter Zijlstraeedeeab2009-03-18 12:38:47 +01002016 dump_stack();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002017 return 0;
2018 }
2019 chain = lock_chains + nr_lock_chains++;
2020 chain->chain_key = chain_key;
Huang, Ying443cd502008-06-20 16:39:21 +08002021 chain->irq_context = hlock->irq_context;
2022 /* Find the first held_lock of current chain */
2023 hlock_next = hlock;
2024 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2025 hlock_curr = curr->held_locks + i;
2026 if (hlock_curr->irq_context != hlock_next->irq_context)
2027 break;
2028 hlock_next = hlock;
2029 }
2030 i++;
2031 chain->depth = curr->lockdep_depth + 1 - i;
Steven Rostedte0944ee2011-04-20 21:42:00 -04002032 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2033 chain->base = nr_chain_hlocks;
2034 nr_chain_hlocks += chain->depth;
Huang, Ying443cd502008-06-20 16:39:21 +08002035 for (j = 0; j < chain->depth - 1; j++, i++) {
Dave Jonesf82b2172008-08-11 09:30:23 +02002036 int lock_id = curr->held_locks[i].class_idx - 1;
Huang, Ying443cd502008-06-20 16:39:21 +08002037 chain_hlocks[chain->base + j] = lock_id;
2038 }
2039 chain_hlocks[chain->base + j] = class - lock_classes;
2040 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002041 list_add_tail_rcu(&chain->entry, hash_head);
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002042 debug_atomic_inc(chain_lookup_misses);
Peter Zijlstra8e182572007-07-19 01:48:54 -07002043 inc_chains();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002044
2045 return 1;
2046}
Peter Zijlstra8e182572007-07-19 01:48:54 -07002047
2048static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
Johannes Berg4e6045f2007-10-18 23:39:55 -07002049 struct held_lock *hlock, int chain_head, u64 chain_key)
Peter Zijlstra8e182572007-07-19 01:48:54 -07002050{
2051 /*
2052 * Trylock needs to maintain the stack of held locks, but it
2053 * does not add new dependencies, because trylock can be done
2054 * in any order.
2055 *
2056 * We look up the chain_key and do the O(N^2) check and update of
2057 * the dependencies only if this is a new dependency chain.
2058 * (If lookup_chain_cache() returns with 1 it acquires
2059 * graph_lock for us)
2060 */
2061 if (!hlock->trylock && (hlock->check == 2) &&
Huang, Ying443cd502008-06-20 16:39:21 +08002062 lookup_chain_cache(curr, hlock, chain_key)) {
Peter Zijlstra8e182572007-07-19 01:48:54 -07002063 /*
2064 * Check whether last held lock:
2065 *
2066 * - is irq-safe, if this lock is irq-unsafe
2067 * - is softirq-safe, if this lock is hardirq-unsafe
2068 *
2069 * And check whether the new lock's dependency graph
2070 * could lead back to the previous lock.
2071 *
2072 * any of these scenarios could lead to a deadlock. If
2073 * All validations
2074 */
2075 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2076
2077 if (!ret)
2078 return 0;
2079 /*
2080 * Mark recursive read, as we jump over it when
2081 * building dependencies (just like we jump over
2082 * trylock entries):
2083 */
2084 if (ret == 2)
2085 hlock->read = 2;
2086 /*
2087 * Add dependency only if this lock is not the head
2088 * of the chain, and if it's not a secondary read-lock:
2089 */
2090 if (!chain_head && ret != 2)
2091 if (!check_prevs_add(curr, hlock))
2092 return 0;
2093 graph_unlock();
2094 } else
2095 /* after lookup_chain_cache(): */
2096 if (unlikely(!debug_locks))
2097 return 0;
2098
2099 return 1;
2100}
2101#else
2102static inline int validate_chain(struct task_struct *curr,
2103 struct lockdep_map *lock, struct held_lock *hlock,
Gregory Haskins3aa416b2007-10-11 22:11:11 +02002104 int chain_head, u64 chain_key)
Peter Zijlstra8e182572007-07-19 01:48:54 -07002105{
2106 return 1;
2107}
Peter Zijlstraca58abc2007-07-19 01:48:53 -07002108#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002109
2110/*
2111 * We are building curr_chain_key incrementally, so double-check
2112 * it from scratch, to make sure that it's done correctly:
2113 */
Steven Rostedt1d09daa2008-05-12 21:20:55 +02002114static void check_chain_key(struct task_struct *curr)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002115{
2116#ifdef CONFIG_DEBUG_LOCKDEP
2117 struct held_lock *hlock, *prev_hlock = NULL;
2118 unsigned int i, id;
2119 u64 chain_key = 0;
2120
2121 for (i = 0; i < curr->lockdep_depth; i++) {
2122 hlock = curr->held_locks + i;
2123 if (chain_key != hlock->prev_chain_key) {
2124 debug_locks_off();
Arjan van de Ven2df8b1d2008-07-30 12:43:11 -07002125 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002126 curr->lockdep_depth, i,
2127 (unsigned long long)chain_key,
2128 (unsigned long long)hlock->prev_chain_key);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002129 return;
2130 }
Dave Jonesf82b2172008-08-11 09:30:23 +02002131 id = hlock->class_idx - 1;
Jarek Poplawski381a2292007-02-10 01:44:58 -08002132 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2133 return;
2134
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002135 if (prev_hlock && (prev_hlock->irq_context !=
2136 hlock->irq_context))
2137 chain_key = 0;
2138 chain_key = iterate_chain_key(chain_key, id);
2139 prev_hlock = hlock;
2140 }
2141 if (chain_key != curr->curr_chain_key) {
2142 debug_locks_off();
Arjan van de Ven2df8b1d2008-07-30 12:43:11 -07002143 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002144 curr->lockdep_depth, i,
2145 (unsigned long long)chain_key,
2146 (unsigned long long)curr->curr_chain_key);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002147 }
2148#endif
2149}
2150
Steven Rostedt282b5c22011-04-20 21:41:59 -04002151static void
2152print_usage_bug_scenario(struct held_lock *lock)
2153{
2154 struct lock_class *class = hlock_class(lock);
2155
2156 printk(" Possible unsafe locking scenario:\n\n");
2157 printk(" CPU0\n");
2158 printk(" ----\n");
2159 printk(" lock(");
2160 __print_lock_name(class);
2161 printk(");\n");
2162 printk(" <Interrupt>\n");
2163 printk(" lock(");
2164 __print_lock_name(class);
2165 printk(");\n");
2166 printk("\n *** DEADLOCK ***\n\n");
2167}
2168
Peter Zijlstra8e182572007-07-19 01:48:54 -07002169static int
2170print_usage_bug(struct task_struct *curr, struct held_lock *this,
2171 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2172{
2173 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2174 return 0;
2175
2176 printk("\n=================================\n");
2177 printk( "[ INFO: inconsistent lock state ]\n");
2178 print_kernel_version();
2179 printk( "---------------------------------\n");
2180
2181 printk("inconsistent {%s} -> {%s} usage.\n",
2182 usage_str[prev_bit], usage_str[new_bit]);
2183
2184 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07002185 curr->comm, task_pid_nr(curr),
Peter Zijlstra8e182572007-07-19 01:48:54 -07002186 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2187 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2188 trace_hardirqs_enabled(curr),
2189 trace_softirqs_enabled(curr));
2190 print_lock(this);
2191
2192 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
Dave Jonesf82b2172008-08-11 09:30:23 +02002193 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
Peter Zijlstra8e182572007-07-19 01:48:54 -07002194
2195 print_irqtrace_events(curr);
2196 printk("\nother info that might help us debug this:\n");
Steven Rostedt282b5c22011-04-20 21:41:59 -04002197 print_usage_bug_scenario(this);
2198
Peter Zijlstra8e182572007-07-19 01:48:54 -07002199 lockdep_print_held_locks(curr);
2200
2201 printk("\nstack backtrace:\n");
2202 dump_stack();
2203
2204 return 0;
2205}
2206
2207/*
2208 * Print out an error if an invalid bit is set:
2209 */
2210static inline int
2211valid_state(struct task_struct *curr, struct held_lock *this,
2212 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2213{
Dave Jonesf82b2172008-08-11 09:30:23 +02002214 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
Peter Zijlstra8e182572007-07-19 01:48:54 -07002215 return print_usage_bug(curr, this, bad_bit, new_bit);
2216 return 1;
2217}
2218
2219static int mark_lock(struct task_struct *curr, struct held_lock *this,
2220 enum lock_usage_bit new_bit);
2221
Steven Rostedt81d68a92008-05-12 21:20:42 +02002222#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002223
2224/*
2225 * print irq inversion bug:
2226 */
2227static int
Ming Lei24208ca2009-07-16 15:44:29 +02002228print_irq_inversion_bug(struct task_struct *curr,
2229 struct lock_list *root, struct lock_list *other,
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002230 struct held_lock *this, int forwards,
2231 const char *irqclass)
2232{
Steven Rostedtdad3d742011-04-20 21:41:57 -04002233 struct lock_list *entry = other;
2234 struct lock_list *middle = NULL;
2235 int depth;
2236
Ingo Molnar74c383f2006-12-13 00:34:43 -08002237 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002238 return 0;
2239
2240 printk("\n=========================================================\n");
2241 printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
Dave Jones99de0552006-09-29 02:00:10 -07002242 print_kernel_version();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002243 printk( "---------------------------------------------------------\n");
2244 printk("%s/%d just changed the state of lock:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07002245 curr->comm, task_pid_nr(curr));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002246 print_lock(this);
2247 if (forwards)
Peter Zijlstra26575e22009-03-04 14:53:24 +01002248 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002249 else
Peter Zijlstra26575e22009-03-04 14:53:24 +01002250 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
Ming Lei24208ca2009-07-16 15:44:29 +02002251 print_lock_name(other->class);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002252 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2253
2254 printk("\nother info that might help us debug this:\n");
Steven Rostedtdad3d742011-04-20 21:41:57 -04002255
2256 /* Find a middle lock (if one exists) */
2257 depth = get_lock_depth(other);
2258 do {
2259 if (depth == 0 && (entry != root)) {
2260 printk("lockdep:%s bad path found in chain graph\n", __func__);
2261 break;
2262 }
2263 middle = entry;
2264 entry = get_lock_parent(entry);
2265 depth--;
2266 } while (entry && entry != root && (depth >= 0));
2267 if (forwards)
2268 print_irq_lock_scenario(root, other,
2269 middle ? middle->class : root->class, other->class);
2270 else
2271 print_irq_lock_scenario(other, root,
2272 middle ? middle->class : other->class, root->class);
2273
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002274 lockdep_print_held_locks(curr);
2275
Ming Lei24208ca2009-07-16 15:44:29 +02002276 printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2277 if (!save_trace(&root->trace))
2278 return 0;
2279 print_shortest_lock_dependencies(other, root);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002280
2281 printk("\nstack backtrace:\n");
2282 dump_stack();
2283
2284 return 0;
2285}
2286
2287/*
2288 * Prove that in the forwards-direction subgraph starting at <this>
2289 * there is no lock matching <mask>:
2290 */
2291static int
2292check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2293 enum lock_usage_bit bit, const char *irqclass)
2294{
2295 int ret;
Ming Leid7aaba12009-07-16 15:44:29 +02002296 struct lock_list root;
2297 struct lock_list *uninitialized_var(target_entry);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002298
Ming Leid7aaba12009-07-16 15:44:29 +02002299 root.parent = NULL;
2300 root.class = hlock_class(this);
2301 ret = find_usage_forwards(&root, bit, &target_entry);
Peter Zijlstraaf012962009-07-16 15:44:29 +02002302 if (ret < 0)
2303 return print_bfs_bug(ret);
2304 if (ret == 1)
2305 return ret;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002306
Ming Lei24208ca2009-07-16 15:44:29 +02002307 return print_irq_inversion_bug(curr, &root, target_entry,
Ming Leid7aaba12009-07-16 15:44:29 +02002308 this, 1, irqclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002309}
2310
2311/*
2312 * Prove that in the backwards-direction subgraph starting at <this>
2313 * there is no lock matching <mask>:
2314 */
2315static int
2316check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2317 enum lock_usage_bit bit, const char *irqclass)
2318{
2319 int ret;
Ming Leid7aaba12009-07-16 15:44:29 +02002320 struct lock_list root;
2321 struct lock_list *uninitialized_var(target_entry);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002322
Ming Leid7aaba12009-07-16 15:44:29 +02002323 root.parent = NULL;
2324 root.class = hlock_class(this);
2325 ret = find_usage_backwards(&root, bit, &target_entry);
Peter Zijlstraaf012962009-07-16 15:44:29 +02002326 if (ret < 0)
2327 return print_bfs_bug(ret);
2328 if (ret == 1)
2329 return ret;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002330
Ming Lei24208ca2009-07-16 15:44:29 +02002331 return print_irq_inversion_bug(curr, &root, target_entry,
Oleg Nesterov48d50672010-01-26 19:16:41 +01002332 this, 0, irqclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002333}
2334
Ingo Molnar3117df02006-12-13 00:34:43 -08002335void print_irqtrace_events(struct task_struct *curr)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002336{
2337 printk("irq event stamp: %u\n", curr->irq_events);
2338 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
2339 print_ip_sym(curr->hardirq_enable_ip);
2340 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
2341 print_ip_sym(curr->hardirq_disable_ip);
2342 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
2343 print_ip_sym(curr->softirq_enable_ip);
2344 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
2345 print_ip_sym(curr->softirq_disable_ip);
2346}
2347
Peter Zijlstracd953022009-01-22 16:38:21 +01002348static int HARDIRQ_verbose(struct lock_class *class)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002349{
Peter Zijlstra8e182572007-07-19 01:48:54 -07002350#if HARDIRQ_VERBOSE
2351 return class_filter(class);
2352#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002353 return 0;
2354}
2355
Peter Zijlstracd953022009-01-22 16:38:21 +01002356static int SOFTIRQ_verbose(struct lock_class *class)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002357{
Peter Zijlstra8e182572007-07-19 01:48:54 -07002358#if SOFTIRQ_VERBOSE
2359 return class_filter(class);
2360#endif
2361 return 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002362}
2363
Peter Zijlstracd953022009-01-22 16:38:21 +01002364static int RECLAIM_FS_verbose(struct lock_class *class)
Nick Piggincf40bd12009-01-21 08:12:39 +01002365{
2366#if RECLAIM_VERBOSE
2367 return class_filter(class);
2368#endif
2369 return 0;
2370}
2371
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002372#define STRICT_READ_CHECKS 1
2373
Peter Zijlstracd953022009-01-22 16:38:21 +01002374static int (*state_verbose_f[])(struct lock_class *class) = {
2375#define LOCKDEP_STATE(__STATE) \
2376 __STATE##_verbose,
2377#include "lockdep_states.h"
2378#undef LOCKDEP_STATE
2379};
2380
2381static inline int state_verbose(enum lock_usage_bit bit,
2382 struct lock_class *class)
2383{
2384 return state_verbose_f[bit >> 2](class);
2385}
2386
Peter Zijlstra42c50d52009-01-22 16:58:16 +01002387typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2388 enum lock_usage_bit bit, const char *name);
2389
Peter Zijlstra6a6904d2009-01-22 16:07:44 +01002390static int
Peter Zijlstra1c21f142009-03-04 13:51:13 +01002391mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2392 enum lock_usage_bit new_bit)
Peter Zijlstra6a6904d2009-01-22 16:07:44 +01002393{
Peter Zijlstraf9892092009-01-22 16:09:59 +01002394 int excl_bit = exclusive_bit(new_bit);
Peter Zijlstra9d3651a2009-01-22 17:18:32 +01002395 int read = new_bit & 1;
Peter Zijlstra42c50d52009-01-22 16:58:16 +01002396 int dir = new_bit & 2;
2397
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002398 /*
2399 * mark USED_IN has to look forwards -- to ensure no dependency
2400 * has ENABLED state, which would allow recursion deadlocks.
2401 *
2402 * mark ENABLED has to look backwards -- to ensure no dependee
2403 * has USED_IN state, which, again, would allow recursion deadlocks.
2404 */
Peter Zijlstra42c50d52009-01-22 16:58:16 +01002405 check_usage_f usage = dir ?
2406 check_usage_backwards : check_usage_forwards;
Peter Zijlstraf9892092009-01-22 16:09:59 +01002407
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002408 /*
2409 * Validate that this particular lock does not have conflicting
2410 * usage states.
2411 */
Peter Zijlstra6a6904d2009-01-22 16:07:44 +01002412 if (!valid_state(curr, this, new_bit, excl_bit))
2413 return 0;
Peter Zijlstra9d3651a2009-01-22 17:18:32 +01002414
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002415 /*
2416 * Validate that the lock dependencies don't have conflicting usage
2417 * states.
2418 */
2419 if ((!read || !dir || STRICT_READ_CHECKS) &&
Peter Zijlstra1c21f142009-03-04 13:51:13 +01002420 !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
Peter Zijlstra6a6904d2009-01-22 16:07:44 +01002421 return 0;
Peter Zijlstra780e8202009-01-22 16:51:29 +01002422
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002423 /*
2424 * Check for read in write conflicts
2425 */
2426 if (!read) {
2427 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2428 return 0;
2429
2430 if (STRICT_READ_CHECKS &&
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01002431 !usage(curr, this, excl_bit + 1,
2432 state_name(new_bit + 1)))
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002433 return 0;
2434 }
Peter Zijlstra780e8202009-01-22 16:51:29 +01002435
Peter Zijlstracd953022009-01-22 16:38:21 +01002436 if (state_verbose(new_bit, hlock_class(this)))
Peter Zijlstra6a6904d2009-01-22 16:07:44 +01002437 return 2;
2438
2439 return 1;
2440}
2441
Nick Piggincf40bd12009-01-21 08:12:39 +01002442enum mark_type {
Peter Zijlstra36bfb9b2009-01-22 14:12:41 +01002443#define LOCKDEP_STATE(__STATE) __STATE,
2444#include "lockdep_states.h"
2445#undef LOCKDEP_STATE
Nick Piggincf40bd12009-01-21 08:12:39 +01002446};
2447
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002448/*
2449 * Mark all held locks with a usage bit:
2450 */
Steven Rostedt1d09daa2008-05-12 21:20:55 +02002451static int
Nick Piggincf40bd12009-01-21 08:12:39 +01002452mark_held_locks(struct task_struct *curr, enum mark_type mark)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002453{
2454 enum lock_usage_bit usage_bit;
2455 struct held_lock *hlock;
2456 int i;
2457
2458 for (i = 0; i < curr->lockdep_depth; i++) {
2459 hlock = curr->held_locks + i;
2460
Peter Zijlstracf2ad4d2009-01-27 13:58:08 +01002461 usage_bit = 2 + (mark << 2); /* ENABLED */
2462 if (hlock->read)
2463 usage_bit += 1; /* READ */
2464
2465 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
Nick Piggincf40bd12009-01-21 08:12:39 +01002466
Peter Zijlstra70a06862011-07-25 12:09:59 +02002467 if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys)
Peter Zijlstraefbe2ee2011-07-07 11:39:45 +02002468 continue;
2469
Jarek Poplawski4ff773bb2007-05-08 00:31:00 -07002470 if (!mark_lock(curr, hlock, usage_bit))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002471 return 0;
2472 }
2473
2474 return 1;
2475}
2476
2477/*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002478 * Hardirqs will be enabled:
2479 */
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02002480static void __trace_hardirqs_on_caller(unsigned long ip)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002481{
2482 struct task_struct *curr = current;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002483
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002484 /* we'll do an OFF -> ON transition: */
2485 curr->hardirqs_enabled = 1;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002486
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002487 /*
2488 * We are going to turn hardirqs on, so set the
2489 * usage bit for all held locks:
2490 */
Nick Piggincf40bd12009-01-21 08:12:39 +01002491 if (!mark_held_locks(curr, HARDIRQ))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002492 return;
2493 /*
2494 * If we have softirqs enabled, then set the usage
2495 * bit for all held locks. (disabled hardirqs prevented
2496 * this bit from being set before)
2497 */
2498 if (curr->softirqs_enabled)
Nick Piggincf40bd12009-01-21 08:12:39 +01002499 if (!mark_held_locks(curr, SOFTIRQ))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002500 return;
2501
2502 curr->hardirq_enable_ip = ip;
2503 curr->hardirq_enable_event = ++curr->irq_events;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002504 debug_atomic_inc(hardirqs_on_events);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002505}
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02002506
2507void trace_hardirqs_on_caller(unsigned long ip)
2508{
2509 time_hardirqs_on(CALLER_ADDR0, ip);
2510
2511 if (unlikely(!debug_locks || current->lockdep_recursion))
2512 return;
2513
Peter Zijlstra7d36b262011-07-26 13:13:44 +02002514 if (unlikely(current->hardirqs_enabled)) {
2515 /*
2516 * Neither irq nor preemption are disabled here
2517 * so this is racy by nature but losing one hit
2518 * in a stat is not a big deal.
2519 */
2520 __debug_atomic_inc(redundant_hardirqs_on);
2521 return;
2522 }
2523
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02002524 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2525 return;
2526
Peter Zijlstra7d36b262011-07-26 13:13:44 +02002527 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2528 return;
2529
2530 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2531 return;
2532
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02002533 current->lockdep_recursion = 1;
2534 __trace_hardirqs_on_caller(ip);
2535 current->lockdep_recursion = 0;
2536}
Steven Rostedt81d68a92008-05-12 21:20:42 +02002537EXPORT_SYMBOL(trace_hardirqs_on_caller);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002538
Steven Rostedt1d09daa2008-05-12 21:20:55 +02002539void trace_hardirqs_on(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +02002540{
2541 trace_hardirqs_on_caller(CALLER_ADDR0);
2542}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002543EXPORT_SYMBOL(trace_hardirqs_on);
2544
2545/*
2546 * Hardirqs were disabled:
2547 */
Heiko Carstens6afe40b2008-10-28 11:14:58 +01002548void trace_hardirqs_off_caller(unsigned long ip)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002549{
2550 struct task_struct *curr = current;
2551
Heiko Carstens6afe40b2008-10-28 11:14:58 +01002552 time_hardirqs_off(CALLER_ADDR0, ip);
Steven Rostedt81d68a92008-05-12 21:20:42 +02002553
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002554 if (unlikely(!debug_locks || current->lockdep_recursion))
2555 return;
2556
2557 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2558 return;
2559
2560 if (curr->hardirqs_enabled) {
2561 /*
2562 * We have done an ON -> OFF transition:
2563 */
2564 curr->hardirqs_enabled = 0;
Heiko Carstens6afe40b2008-10-28 11:14:58 +01002565 curr->hardirq_disable_ip = ip;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002566 curr->hardirq_disable_event = ++curr->irq_events;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002567 debug_atomic_inc(hardirqs_off_events);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002568 } else
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002569 debug_atomic_inc(redundant_hardirqs_off);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002570}
Steven Rostedt81d68a92008-05-12 21:20:42 +02002571EXPORT_SYMBOL(trace_hardirqs_off_caller);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002572
Steven Rostedt1d09daa2008-05-12 21:20:55 +02002573void trace_hardirqs_off(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +02002574{
2575 trace_hardirqs_off_caller(CALLER_ADDR0);
2576}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002577EXPORT_SYMBOL(trace_hardirqs_off);
2578
2579/*
2580 * Softirqs will be enabled:
2581 */
2582void trace_softirqs_on(unsigned long ip)
2583{
2584 struct task_struct *curr = current;
2585
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02002586 if (unlikely(!debug_locks || current->lockdep_recursion))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002587 return;
2588
2589 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2590 return;
2591
2592 if (curr->softirqs_enabled) {
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002593 debug_atomic_inc(redundant_softirqs_on);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002594 return;
2595 }
2596
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02002597 current->lockdep_recursion = 1;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002598 /*
2599 * We'll do an OFF -> ON transition:
2600 */
2601 curr->softirqs_enabled = 1;
2602 curr->softirq_enable_ip = ip;
2603 curr->softirq_enable_event = ++curr->irq_events;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002604 debug_atomic_inc(softirqs_on_events);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002605 /*
2606 * We are going to turn softirqs on, so set the
2607 * usage bit for all held locks, if hardirqs are
2608 * enabled too:
2609 */
2610 if (curr->hardirqs_enabled)
Nick Piggincf40bd12009-01-21 08:12:39 +01002611 mark_held_locks(curr, SOFTIRQ);
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02002612 current->lockdep_recursion = 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002613}
2614
2615/*
2616 * Softirqs were disabled:
2617 */
2618void trace_softirqs_off(unsigned long ip)
2619{
2620 struct task_struct *curr = current;
2621
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02002622 if (unlikely(!debug_locks || current->lockdep_recursion))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002623 return;
2624
2625 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2626 return;
2627
2628 if (curr->softirqs_enabled) {
2629 /*
2630 * We have done an ON -> OFF transition:
2631 */
2632 curr->softirqs_enabled = 0;
2633 curr->softirq_disable_ip = ip;
2634 curr->softirq_disable_event = ++curr->irq_events;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002635 debug_atomic_inc(softirqs_off_events);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002636 DEBUG_LOCKS_WARN_ON(!softirq_count());
2637 } else
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002638 debug_atomic_inc(redundant_softirqs_off);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002639}
2640
Peter Zijlstra2f850182009-03-20 11:13:20 +01002641static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
Nick Piggincf40bd12009-01-21 08:12:39 +01002642{
2643 struct task_struct *curr = current;
2644
2645 if (unlikely(!debug_locks))
2646 return;
2647
2648 /* no reclaim without waiting on it */
2649 if (!(gfp_mask & __GFP_WAIT))
2650 return;
2651
2652 /* this guy won't enter reclaim */
2653 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2654 return;
2655
2656 /* We're only interested __GFP_FS allocations for now */
2657 if (!(gfp_mask & __GFP_FS))
2658 return;
2659
Peter Zijlstra2f850182009-03-20 11:13:20 +01002660 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
Nick Piggincf40bd12009-01-21 08:12:39 +01002661 return;
2662
2663 mark_held_locks(curr, RECLAIM_FS);
2664}
2665
Peter Zijlstra2f850182009-03-20 11:13:20 +01002666static void check_flags(unsigned long flags);
2667
2668void lockdep_trace_alloc(gfp_t gfp_mask)
2669{
2670 unsigned long flags;
2671
2672 if (unlikely(current->lockdep_recursion))
2673 return;
2674
2675 raw_local_irq_save(flags);
2676 check_flags(flags);
2677 current->lockdep_recursion = 1;
2678 __lockdep_trace_alloc(gfp_mask, flags);
2679 current->lockdep_recursion = 0;
2680 raw_local_irq_restore(flags);
2681}
2682
Peter Zijlstra8e182572007-07-19 01:48:54 -07002683static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2684{
2685 /*
2686 * If non-trylock use in a hardirq or softirq context, then
2687 * mark the lock as used in these contexts:
2688 */
2689 if (!hlock->trylock) {
2690 if (hlock->read) {
2691 if (curr->hardirq_context)
2692 if (!mark_lock(curr, hlock,
2693 LOCK_USED_IN_HARDIRQ_READ))
2694 return 0;
2695 if (curr->softirq_context)
2696 if (!mark_lock(curr, hlock,
2697 LOCK_USED_IN_SOFTIRQ_READ))
2698 return 0;
2699 } else {
2700 if (curr->hardirq_context)
2701 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2702 return 0;
2703 if (curr->softirq_context)
2704 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2705 return 0;
2706 }
2707 }
2708 if (!hlock->hardirqs_off) {
2709 if (hlock->read) {
2710 if (!mark_lock(curr, hlock,
Peter Zijlstra4fc95e82009-01-22 13:10:52 +01002711 LOCK_ENABLED_HARDIRQ_READ))
Peter Zijlstra8e182572007-07-19 01:48:54 -07002712 return 0;
2713 if (curr->softirqs_enabled)
2714 if (!mark_lock(curr, hlock,
Peter Zijlstra4fc95e82009-01-22 13:10:52 +01002715 LOCK_ENABLED_SOFTIRQ_READ))
Peter Zijlstra8e182572007-07-19 01:48:54 -07002716 return 0;
2717 } else {
2718 if (!mark_lock(curr, hlock,
Peter Zijlstra4fc95e82009-01-22 13:10:52 +01002719 LOCK_ENABLED_HARDIRQ))
Peter Zijlstra8e182572007-07-19 01:48:54 -07002720 return 0;
2721 if (curr->softirqs_enabled)
2722 if (!mark_lock(curr, hlock,
Peter Zijlstra4fc95e82009-01-22 13:10:52 +01002723 LOCK_ENABLED_SOFTIRQ))
Peter Zijlstra8e182572007-07-19 01:48:54 -07002724 return 0;
2725 }
2726 }
2727
Nick Piggincf40bd12009-01-21 08:12:39 +01002728 /*
2729 * We reuse the irq context infrastructure more broadly as a general
2730 * context checking code. This tests GFP_FS recursion (a lock taken
2731 * during reclaim for a GFP_FS allocation is held over a GFP_FS
2732 * allocation).
2733 */
2734 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2735 if (hlock->read) {
2736 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2737 return 0;
2738 } else {
2739 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2740 return 0;
2741 }
2742 }
2743
Peter Zijlstra8e182572007-07-19 01:48:54 -07002744 return 1;
2745}
2746
2747static int separate_irq_context(struct task_struct *curr,
2748 struct held_lock *hlock)
2749{
2750 unsigned int depth = curr->lockdep_depth;
2751
2752 /*
2753 * Keep track of points where we cross into an interrupt context:
2754 */
2755 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2756 curr->softirq_context;
2757 if (depth) {
2758 struct held_lock *prev_hlock;
2759
2760 prev_hlock = curr->held_locks + depth-1;
2761 /*
2762 * If we cross into another context, reset the
2763 * hash key (this also prevents the checking and the
2764 * adding of the dependency to 'prev'):
2765 */
2766 if (prev_hlock->irq_context != hlock->irq_context)
2767 return 1;
2768 }
2769 return 0;
2770}
2771
2772#else
2773
2774static inline
2775int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2776 enum lock_usage_bit new_bit)
2777{
2778 WARN_ON(1);
2779 return 1;
2780}
2781
2782static inline int mark_irqflags(struct task_struct *curr,
2783 struct held_lock *hlock)
2784{
2785 return 1;
2786}
2787
2788static inline int separate_irq_context(struct task_struct *curr,
2789 struct held_lock *hlock)
2790{
2791 return 0;
2792}
2793
Peter Zijlstra868a23a2009-02-15 00:25:21 +01002794void lockdep_trace_alloc(gfp_t gfp_mask)
2795{
2796}
2797
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002798#endif
2799
2800/*
Peter Zijlstra8e182572007-07-19 01:48:54 -07002801 * Mark a lock with a usage bit, and validate the state transition:
2802 */
Steven Rostedt1d09daa2008-05-12 21:20:55 +02002803static int mark_lock(struct task_struct *curr, struct held_lock *this,
Steven Rostedt0764d232008-05-12 21:20:44 +02002804 enum lock_usage_bit new_bit)
Peter Zijlstra8e182572007-07-19 01:48:54 -07002805{
2806 unsigned int new_mask = 1 << new_bit, ret = 1;
2807
2808 /*
2809 * If already set then do not dirty the cacheline,
2810 * nor do any checks:
2811 */
Dave Jonesf82b2172008-08-11 09:30:23 +02002812 if (likely(hlock_class(this)->usage_mask & new_mask))
Peter Zijlstra8e182572007-07-19 01:48:54 -07002813 return 1;
2814
2815 if (!graph_lock())
2816 return 0;
2817 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002818 * Make sure we didn't race:
Peter Zijlstra8e182572007-07-19 01:48:54 -07002819 */
Dave Jonesf82b2172008-08-11 09:30:23 +02002820 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
Peter Zijlstra8e182572007-07-19 01:48:54 -07002821 graph_unlock();
2822 return 1;
2823 }
2824
Dave Jonesf82b2172008-08-11 09:30:23 +02002825 hlock_class(this)->usage_mask |= new_mask;
Peter Zijlstra8e182572007-07-19 01:48:54 -07002826
Dave Jonesf82b2172008-08-11 09:30:23 +02002827 if (!save_trace(hlock_class(this)->usage_traces + new_bit))
Peter Zijlstra8e182572007-07-19 01:48:54 -07002828 return 0;
2829
2830 switch (new_bit) {
Peter Zijlstra53464172009-01-22 14:15:53 +01002831#define LOCKDEP_STATE(__STATE) \
2832 case LOCK_USED_IN_##__STATE: \
2833 case LOCK_USED_IN_##__STATE##_READ: \
2834 case LOCK_ENABLED_##__STATE: \
2835 case LOCK_ENABLED_##__STATE##_READ:
2836#include "lockdep_states.h"
2837#undef LOCKDEP_STATE
Peter Zijlstra8e182572007-07-19 01:48:54 -07002838 ret = mark_lock_irq(curr, this, new_bit);
2839 if (!ret)
2840 return 0;
2841 break;
2842 case LOCK_USED:
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002843 debug_atomic_dec(nr_unused_locks);
Peter Zijlstra8e182572007-07-19 01:48:54 -07002844 break;
2845 default:
2846 if (!debug_locks_off_graph_unlock())
2847 return 0;
2848 WARN_ON(1);
2849 return 0;
2850 }
2851
2852 graph_unlock();
2853
2854 /*
2855 * We must printk outside of the graph_lock:
2856 */
2857 if (ret == 2) {
2858 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2859 print_lock(this);
2860 print_irqtrace_events(curr);
2861 dump_stack();
2862 }
2863
2864 return ret;
2865}
2866
2867/*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002868 * Initialize a lock instance's lock-class mapping info:
2869 */
2870void lockdep_init_map(struct lockdep_map *lock, const char *name,
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -04002871 struct lock_class_key *key, int subclass)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002872{
Tejun Heof59de892011-07-14 15:19:09 +02002873 memset(lock, 0, sizeof(*lock));
Hitoshi Mitake62016252010-10-05 18:01:51 +09002874
Peter Zijlstrac8a25002009-04-17 09:40:49 +02002875#ifdef CONFIG_LOCK_STAT
2876 lock->cpu = raw_smp_processor_id();
2877#endif
2878
2879 if (DEBUG_LOCKS_WARN_ON(!name)) {
2880 lock->name = "NULL";
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002881 return;
Peter Zijlstrac8a25002009-04-17 09:40:49 +02002882 }
2883
2884 lock->name = name;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002885
2886 if (DEBUG_LOCKS_WARN_ON(!key))
2887 return;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002888 /*
2889 * Sanity check, the lock-class key must be persistent:
2890 */
2891 if (!static_obj(key)) {
2892 printk("BUG: key %p not in .data!\n", key);
2893 DEBUG_LOCKS_WARN_ON(1);
2894 return;
2895 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002896 lock->key = key;
Peter Zijlstrac8a25002009-04-17 09:40:49 +02002897
2898 if (unlikely(!debug_locks))
2899 return;
2900
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -04002901 if (subclass)
2902 register_lock_class(lock, subclass, 1);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002903}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002904EXPORT_SYMBOL_GPL(lockdep_init_map);
2905
Peter Zijlstra1704f472010-03-19 01:37:42 +01002906struct lock_class_key __lockdep_no_validate__;
2907
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002908/*
2909 * This gets called for every mutex_lock*()/spin_lock*() operation.
2910 * We maintain the dependency maps and validate the locking attempt:
2911 */
2912static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2913 int trylock, int read, int check, int hardirqs_off,
Peter Zijlstrabb97a912009-07-20 19:15:35 +02002914 struct lockdep_map *nest_lock, unsigned long ip,
2915 int references)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002916{
2917 struct task_struct *curr = current;
Ingo Molnard6d897c2006-07-10 04:44:04 -07002918 struct lock_class *class = NULL;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002919 struct held_lock *hlock;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002920 unsigned int depth, id;
2921 int chain_head = 0;
Peter Zijlstrabb97a912009-07-20 19:15:35 +02002922 int class_idx;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002923 u64 chain_key;
2924
Peter Zijlstraf20786f2007-07-19 01:48:56 -07002925 if (!prove_locking)
2926 check = 1;
2927
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002928 if (unlikely(!debug_locks))
2929 return 0;
2930
2931 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2932 return 0;
2933
Peter Zijlstra1704f472010-03-19 01:37:42 +01002934 if (lock->key == &__lockdep_no_validate__)
2935 check = 1;
2936
Hitoshi Mitake62016252010-10-05 18:01:51 +09002937 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
2938 class = lock->class_cache[subclass];
Ingo Molnard6d897c2006-07-10 04:44:04 -07002939 /*
Hitoshi Mitake62016252010-10-05 18:01:51 +09002940 * Not cached?
Ingo Molnard6d897c2006-07-10 04:44:04 -07002941 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002942 if (unlikely(!class)) {
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -04002943 class = register_lock_class(lock, subclass, 0);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002944 if (!class)
2945 return 0;
2946 }
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002947 atomic_inc((atomic_t *)&class->ops);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002948 if (very_verbose(class)) {
2949 printk("\nacquire class [%p] %s", class->key, class->name);
2950 if (class->name_version > 1)
2951 printk("#%d", class->name_version);
2952 printk("\n");
2953 dump_stack();
2954 }
2955
2956 /*
2957 * Add the lock to the list of currently held locks.
2958 * (we dont increase the depth just yet, up until the
2959 * dependency checks are done)
2960 */
2961 depth = curr->lockdep_depth;
2962 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2963 return 0;
2964
Peter Zijlstrabb97a912009-07-20 19:15:35 +02002965 class_idx = class - lock_classes + 1;
2966
2967 if (depth) {
2968 hlock = curr->held_locks + depth - 1;
2969 if (hlock->class_idx == class_idx && nest_lock) {
2970 if (hlock->references)
2971 hlock->references++;
2972 else
2973 hlock->references = 2;
2974
2975 return 1;
2976 }
2977 }
2978
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002979 hlock = curr->held_locks + depth;
Dave Jonesf82b2172008-08-11 09:30:23 +02002980 if (DEBUG_LOCKS_WARN_ON(!class))
2981 return 0;
Peter Zijlstrabb97a912009-07-20 19:15:35 +02002982 hlock->class_idx = class_idx;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002983 hlock->acquire_ip = ip;
2984 hlock->instance = lock;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02002985 hlock->nest_lock = nest_lock;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002986 hlock->trylock = trylock;
2987 hlock->read = read;
2988 hlock->check = check;
Dmitry Baryshkov6951b122008-08-18 04:26:37 +04002989 hlock->hardirqs_off = !!hardirqs_off;
Peter Zijlstrabb97a912009-07-20 19:15:35 +02002990 hlock->references = references;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07002991#ifdef CONFIG_LOCK_STAT
2992 hlock->waittime_stamp = 0;
Peter Zijlstra3365e7792009-10-09 10:12:41 +02002993 hlock->holdtime_stamp = lockstat_clock();
Peter Zijlstraf20786f2007-07-19 01:48:56 -07002994#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002995
Peter Zijlstra8e182572007-07-19 01:48:54 -07002996 if (check == 2 && !mark_irqflags(curr, hlock))
2997 return 0;
2998
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002999 /* mark it as used: */
Jarek Poplawski4ff773bb2007-05-08 00:31:00 -07003000 if (!mark_lock(curr, hlock, LOCK_USED))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003001 return 0;
Peter Zijlstra8e182572007-07-19 01:48:54 -07003002
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003003 /*
Gautham R Shenoy17aacfb2007-10-28 20:47:01 +01003004 * Calculate the chain hash: it's the combined hash of all the
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003005 * lock keys along the dependency chain. We save the hash value
3006 * at every step so that we can get the current hash easily
3007 * after unlock. The chain hash is then used to cache dependency
3008 * results.
3009 *
3010 * The 'key ID' is what is the most compact key value to drive
3011 * the hash, not class->key.
3012 */
3013 id = class - lock_classes;
3014 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
3015 return 0;
3016
3017 chain_key = curr->curr_chain_key;
3018 if (!depth) {
3019 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3020 return 0;
3021 chain_head = 1;
3022 }
3023
3024 hlock->prev_chain_key = chain_key;
Peter Zijlstra8e182572007-07-19 01:48:54 -07003025 if (separate_irq_context(curr, hlock)) {
3026 chain_key = 0;
3027 chain_head = 1;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003028 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003029 chain_key = iterate_chain_key(chain_key, id);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003030
Gregory Haskins3aa416b2007-10-11 22:11:11 +02003031 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
Peter Zijlstra8e182572007-07-19 01:48:54 -07003032 return 0;
Jarek Poplawski381a2292007-02-10 01:44:58 -08003033
Gregory Haskins3aa416b2007-10-11 22:11:11 +02003034 curr->curr_chain_key = chain_key;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003035 curr->lockdep_depth++;
3036 check_chain_key(curr);
Jarek Poplawski60e114d2007-02-20 13:58:00 -08003037#ifdef CONFIG_DEBUG_LOCKDEP
3038 if (unlikely(!debug_locks))
3039 return 0;
3040#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003041 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3042 debug_locks_off();
3043 printk("BUG: MAX_LOCK_DEPTH too low!\n");
3044 printk("turning off the locking correctness validator.\n");
Peter Zijlstraeedeeab2009-03-18 12:38:47 +01003045 dump_stack();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003046 return 0;
3047 }
Jarek Poplawski381a2292007-02-10 01:44:58 -08003048
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003049 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3050 max_lockdep_depth = curr->lockdep_depth;
3051
3052 return 1;
3053}
3054
3055static int
3056print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3057 unsigned long ip)
3058{
3059 if (!debug_locks_off())
3060 return 0;
3061 if (debug_locks_silent)
3062 return 0;
3063
3064 printk("\n=====================================\n");
3065 printk( "[ BUG: bad unlock balance detected! ]\n");
3066 printk( "-------------------------------------\n");
3067 printk("%s/%d is trying to release lock (",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07003068 curr->comm, task_pid_nr(curr));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003069 print_lockdep_cache(lock);
3070 printk(") at:\n");
3071 print_ip_sym(ip);
3072 printk("but there are no more locks to release!\n");
3073 printk("\nother info that might help us debug this:\n");
3074 lockdep_print_held_locks(curr);
3075
3076 printk("\nstack backtrace:\n");
3077 dump_stack();
3078
3079 return 0;
3080}
3081
3082/*
3083 * Common debugging checks for both nested and non-nested unlock:
3084 */
3085static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
3086 unsigned long ip)
3087{
3088 if (unlikely(!debug_locks))
3089 return 0;
3090 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3091 return 0;
3092
3093 if (curr->lockdep_depth <= 0)
3094 return print_unlock_inbalance_bug(curr, lock, ip);
3095
3096 return 1;
3097}
3098
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003099static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3100{
3101 if (hlock->instance == lock)
3102 return 1;
3103
3104 if (hlock->references) {
Hitoshi Mitake62016252010-10-05 18:01:51 +09003105 struct lock_class *class = lock->class_cache[0];
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003106
3107 if (!class)
3108 class = look_up_lock_class(lock, 0);
3109
Peter Zijlstra80e04012011-08-05 14:26:17 +02003110 /*
3111 * If look_up_lock_class() failed to find a class, we're trying
3112 * to test if we hold a lock that has never yet been acquired.
3113 * Clearly if the lock hasn't been acquired _ever_, we're not
3114 * holding it either, so report failure.
3115 */
3116 if (!class)
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003117 return 0;
3118
3119 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3120 return 0;
3121
3122 if (hlock->class_idx == class - lock_classes + 1)
3123 return 1;
3124 }
3125
3126 return 0;
3127}
3128
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003129static int
Peter Zijlstra00ef9f72008-12-04 09:00:17 +01003130__lock_set_class(struct lockdep_map *lock, const char *name,
3131 struct lock_class_key *key, unsigned int subclass,
3132 unsigned long ip)
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003133{
3134 struct task_struct *curr = current;
3135 struct held_lock *hlock, *prev_hlock;
3136 struct lock_class *class;
3137 unsigned int depth;
3138 int i;
3139
3140 depth = curr->lockdep_depth;
3141 if (DEBUG_LOCKS_WARN_ON(!depth))
3142 return 0;
3143
3144 prev_hlock = NULL;
3145 for (i = depth-1; i >= 0; i--) {
3146 hlock = curr->held_locks + i;
3147 /*
3148 * We must not cross into another context:
3149 */
3150 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3151 break;
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003152 if (match_held_lock(hlock, lock))
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003153 goto found_it;
3154 prev_hlock = hlock;
3155 }
3156 return print_unlock_inbalance_bug(curr, lock, ip);
3157
3158found_it:
Peter Zijlstra00ef9f72008-12-04 09:00:17 +01003159 lockdep_init_map(lock, name, key, 0);
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003160 class = register_lock_class(lock, subclass, 0);
Dave Jonesf82b2172008-08-11 09:30:23 +02003161 hlock->class_idx = class - lock_classes + 1;
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003162
3163 curr->lockdep_depth = i;
3164 curr->curr_chain_key = hlock->prev_chain_key;
3165
3166 for (; i < depth; i++) {
3167 hlock = curr->held_locks + i;
3168 if (!__lock_acquire(hlock->instance,
Dave Jonesf82b2172008-08-11 09:30:23 +02003169 hlock_class(hlock)->subclass, hlock->trylock,
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003170 hlock->read, hlock->check, hlock->hardirqs_off,
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003171 hlock->nest_lock, hlock->acquire_ip,
3172 hlock->references))
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003173 return 0;
3174 }
3175
3176 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3177 return 0;
3178 return 1;
3179}
3180
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003181/*
3182 * Remove the lock to the list of currently held locks in a
3183 * potentially non-nested (out of order) manner. This is a
3184 * relatively rare operation, as all the unlock APIs default
3185 * to nested mode (which uses lock_release()):
3186 */
3187static int
3188lock_release_non_nested(struct task_struct *curr,
3189 struct lockdep_map *lock, unsigned long ip)
3190{
3191 struct held_lock *hlock, *prev_hlock;
3192 unsigned int depth;
3193 int i;
3194
3195 /*
3196 * Check whether the lock exists in the current stack
3197 * of held locks:
3198 */
3199 depth = curr->lockdep_depth;
3200 if (DEBUG_LOCKS_WARN_ON(!depth))
3201 return 0;
3202
3203 prev_hlock = NULL;
3204 for (i = depth-1; i >= 0; i--) {
3205 hlock = curr->held_locks + i;
3206 /*
3207 * We must not cross into another context:
3208 */
3209 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3210 break;
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003211 if (match_held_lock(hlock, lock))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003212 goto found_it;
3213 prev_hlock = hlock;
3214 }
3215 return print_unlock_inbalance_bug(curr, lock, ip);
3216
3217found_it:
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003218 if (hlock->instance == lock)
3219 lock_release_holdtime(hlock);
3220
3221 if (hlock->references) {
3222 hlock->references--;
3223 if (hlock->references) {
3224 /*
3225 * We had, and after removing one, still have
3226 * references, the current lock stack is still
3227 * valid. We're done!
3228 */
3229 return 1;
3230 }
3231 }
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003232
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003233 /*
3234 * We have the right lock to unlock, 'hlock' points to it.
3235 * Now we remove it from the stack, and add back the other
3236 * entries (if any), recalculating the hash along the way:
3237 */
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003238
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003239 curr->lockdep_depth = i;
3240 curr->curr_chain_key = hlock->prev_chain_key;
3241
3242 for (i++; i < depth; i++) {
3243 hlock = curr->held_locks + i;
3244 if (!__lock_acquire(hlock->instance,
Dave Jonesf82b2172008-08-11 09:30:23 +02003245 hlock_class(hlock)->subclass, hlock->trylock,
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003246 hlock->read, hlock->check, hlock->hardirqs_off,
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003247 hlock->nest_lock, hlock->acquire_ip,
3248 hlock->references))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003249 return 0;
3250 }
3251
3252 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3253 return 0;
3254 return 1;
3255}
3256
3257/*
3258 * Remove the lock to the list of currently held locks - this gets
3259 * called on mutex_unlock()/spin_unlock*() (or on a failed
3260 * mutex_lock_interruptible()). This is done for unlocks that nest
3261 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3262 */
3263static int lock_release_nested(struct task_struct *curr,
3264 struct lockdep_map *lock, unsigned long ip)
3265{
3266 struct held_lock *hlock;
3267 unsigned int depth;
3268
3269 /*
3270 * Pop off the top of the lock stack:
3271 */
3272 depth = curr->lockdep_depth - 1;
3273 hlock = curr->held_locks + depth;
3274
3275 /*
3276 * Is the unlock non-nested:
3277 */
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003278 if (hlock->instance != lock || hlock->references)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003279 return lock_release_non_nested(curr, lock, ip);
3280 curr->lockdep_depth--;
3281
3282 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
3283 return 0;
3284
3285 curr->curr_chain_key = hlock->prev_chain_key;
3286
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003287 lock_release_holdtime(hlock);
3288
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003289#ifdef CONFIG_DEBUG_LOCKDEP
3290 hlock->prev_chain_key = 0;
Dave Jonesf82b2172008-08-11 09:30:23 +02003291 hlock->class_idx = 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003292 hlock->acquire_ip = 0;
3293 hlock->irq_context = 0;
3294#endif
3295 return 1;
3296}
3297
3298/*
3299 * Remove the lock to the list of currently held locks - this gets
3300 * called on mutex_unlock()/spin_unlock*() (or on a failed
3301 * mutex_lock_interruptible()). This is done for unlocks that nest
3302 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3303 */
3304static void
3305__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3306{
3307 struct task_struct *curr = current;
3308
3309 if (!check_unlock(curr, lock, ip))
3310 return;
3311
3312 if (nested) {
3313 if (!lock_release_nested(curr, lock, ip))
3314 return;
3315 } else {
3316 if (!lock_release_non_nested(curr, lock, ip))
3317 return;
3318 }
3319
3320 check_chain_key(curr);
3321}
3322
Peter Zijlstraf607c662009-07-20 19:16:29 +02003323static int __lock_is_held(struct lockdep_map *lock)
3324{
3325 struct task_struct *curr = current;
3326 int i;
3327
3328 for (i = 0; i < curr->lockdep_depth; i++) {
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003329 struct held_lock *hlock = curr->held_locks + i;
3330
3331 if (match_held_lock(hlock, lock))
Peter Zijlstraf607c662009-07-20 19:16:29 +02003332 return 1;
3333 }
3334
3335 return 0;
3336}
3337
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003338/*
3339 * Check whether we follow the irq-flags state precisely:
3340 */
Steven Rostedt1d09daa2008-05-12 21:20:55 +02003341static void check_flags(unsigned long flags)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003342{
Ingo Molnar992860e2008-07-14 10:28:38 +02003343#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3344 defined(CONFIG_TRACE_IRQFLAGS)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003345 if (!debug_locks)
3346 return;
3347
Ingo Molnar5f9fa8a2007-12-07 19:02:47 +01003348 if (irqs_disabled_flags(flags)) {
3349 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3350 printk("possible reason: unannotated irqs-off.\n");
3351 }
3352 } else {
3353 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3354 printk("possible reason: unannotated irqs-on.\n");
3355 }
3356 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003357
3358 /*
3359 * We dont accurately track softirq state in e.g.
3360 * hardirq contexts (such as on 4KSTACKS), so only
3361 * check if not in hardirq contexts:
3362 */
3363 if (!hardirq_count()) {
3364 if (softirq_count())
3365 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3366 else
3367 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3368 }
3369
3370 if (!debug_locks)
3371 print_irqtrace_events(current);
3372#endif
3373}
3374
Peter Zijlstra00ef9f72008-12-04 09:00:17 +01003375void lock_set_class(struct lockdep_map *lock, const char *name,
3376 struct lock_class_key *key, unsigned int subclass,
3377 unsigned long ip)
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003378{
3379 unsigned long flags;
3380
3381 if (unlikely(current->lockdep_recursion))
3382 return;
3383
3384 raw_local_irq_save(flags);
3385 current->lockdep_recursion = 1;
3386 check_flags(flags);
Peter Zijlstra00ef9f72008-12-04 09:00:17 +01003387 if (__lock_set_class(lock, name, key, subclass, ip))
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003388 check_chain_key(current);
3389 current->lockdep_recursion = 0;
3390 raw_local_irq_restore(flags);
3391}
Peter Zijlstra00ef9f72008-12-04 09:00:17 +01003392EXPORT_SYMBOL_GPL(lock_set_class);
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003393
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003394/*
3395 * We are not always called with irqs disabled - do that here,
3396 * and also avoid lockdep recursion:
3397 */
Steven Rostedt1d09daa2008-05-12 21:20:55 +02003398void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02003399 int trylock, int read, int check,
3400 struct lockdep_map *nest_lock, unsigned long ip)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003401{
3402 unsigned long flags;
3403
3404 if (unlikely(current->lockdep_recursion))
3405 return;
3406
3407 raw_local_irq_save(flags);
3408 check_flags(flags);
3409
3410 current->lockdep_recursion = 1;
Frederic Weisbeckerdb2c4c72010-02-02 23:34:40 +01003411 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003412 __lock_acquire(lock, subclass, trylock, read, check,
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003413 irqs_disabled_flags(flags), nest_lock, ip, 0);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003414 current->lockdep_recursion = 0;
3415 raw_local_irq_restore(flags);
3416}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003417EXPORT_SYMBOL_GPL(lock_acquire);
3418
Steven Rostedt1d09daa2008-05-12 21:20:55 +02003419void lock_release(struct lockdep_map *lock, int nested,
Steven Rostedt0764d232008-05-12 21:20:44 +02003420 unsigned long ip)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003421{
3422 unsigned long flags;
3423
3424 if (unlikely(current->lockdep_recursion))
3425 return;
3426
3427 raw_local_irq_save(flags);
3428 check_flags(flags);
3429 current->lockdep_recursion = 1;
Frederic Weisbecker93135432010-05-08 06:24:25 +02003430 trace_lock_release(lock, ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003431 __lock_release(lock, nested, ip);
3432 current->lockdep_recursion = 0;
3433 raw_local_irq_restore(flags);
3434}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003435EXPORT_SYMBOL_GPL(lock_release);
3436
Peter Zijlstraf607c662009-07-20 19:16:29 +02003437int lock_is_held(struct lockdep_map *lock)
3438{
3439 unsigned long flags;
3440 int ret = 0;
3441
3442 if (unlikely(current->lockdep_recursion))
Peter Zijlstraf2513cd2011-06-06 12:32:43 +02003443 return 1; /* avoid false negative lockdep_assert_held() */
Peter Zijlstraf607c662009-07-20 19:16:29 +02003444
3445 raw_local_irq_save(flags);
3446 check_flags(flags);
3447
3448 current->lockdep_recursion = 1;
3449 ret = __lock_is_held(lock);
3450 current->lockdep_recursion = 0;
3451 raw_local_irq_restore(flags);
3452
3453 return ret;
3454}
3455EXPORT_SYMBOL_GPL(lock_is_held);
3456
Nick Piggincf40bd12009-01-21 08:12:39 +01003457void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3458{
3459 current->lockdep_reclaim_gfp = gfp_mask;
3460}
3461
3462void lockdep_clear_current_reclaim_state(void)
3463{
3464 current->lockdep_reclaim_gfp = 0;
3465}
3466
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003467#ifdef CONFIG_LOCK_STAT
3468static int
3469print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3470 unsigned long ip)
3471{
3472 if (!debug_locks_off())
3473 return 0;
3474 if (debug_locks_silent)
3475 return 0;
3476
3477 printk("\n=================================\n");
3478 printk( "[ BUG: bad contention detected! ]\n");
3479 printk( "---------------------------------\n");
3480 printk("%s/%d is trying to contend lock (",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07003481 curr->comm, task_pid_nr(curr));
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003482 print_lockdep_cache(lock);
3483 printk(") at:\n");
3484 print_ip_sym(ip);
3485 printk("but there are no locks held!\n");
3486 printk("\nother info that might help us debug this:\n");
3487 lockdep_print_held_locks(curr);
3488
3489 printk("\nstack backtrace:\n");
3490 dump_stack();
3491
3492 return 0;
3493}
3494
3495static void
3496__lock_contended(struct lockdep_map *lock, unsigned long ip)
3497{
3498 struct task_struct *curr = current;
3499 struct held_lock *hlock, *prev_hlock;
3500 struct lock_class_stats *stats;
3501 unsigned int depth;
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02003502 int i, contention_point, contending_point;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003503
3504 depth = curr->lockdep_depth;
3505 if (DEBUG_LOCKS_WARN_ON(!depth))
3506 return;
3507
3508 prev_hlock = NULL;
3509 for (i = depth-1; i >= 0; i--) {
3510 hlock = curr->held_locks + i;
3511 /*
3512 * We must not cross into another context:
3513 */
3514 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3515 break;
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003516 if (match_held_lock(hlock, lock))
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003517 goto found_it;
3518 prev_hlock = hlock;
3519 }
3520 print_lock_contention_bug(curr, lock, ip);
3521 return;
3522
3523found_it:
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003524 if (hlock->instance != lock)
3525 return;
3526
Peter Zijlstra3365e7792009-10-09 10:12:41 +02003527 hlock->waittime_stamp = lockstat_clock();
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003528
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02003529 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3530 contending_point = lock_point(hlock_class(hlock)->contending_point,
3531 lock->ip);
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003532
Dave Jonesf82b2172008-08-11 09:30:23 +02003533 stats = get_lock_stats(hlock_class(hlock));
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02003534 if (contention_point < LOCKSTAT_POINTS)
3535 stats->contention_point[contention_point]++;
3536 if (contending_point < LOCKSTAT_POINTS)
3537 stats->contending_point[contending_point]++;
Peter Zijlstra96645672007-07-19 01:49:00 -07003538 if (lock->cpu != smp_processor_id())
3539 stats->bounces[bounce_contended + !!hlock->read]++;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003540 put_lock_stats(stats);
3541}
3542
3543static void
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02003544__lock_acquired(struct lockdep_map *lock, unsigned long ip)
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003545{
3546 struct task_struct *curr = current;
3547 struct held_lock *hlock, *prev_hlock;
3548 struct lock_class_stats *stats;
3549 unsigned int depth;
Peter Zijlstra3365e7792009-10-09 10:12:41 +02003550 u64 now, waittime = 0;
Peter Zijlstra96645672007-07-19 01:49:00 -07003551 int i, cpu;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003552
3553 depth = curr->lockdep_depth;
3554 if (DEBUG_LOCKS_WARN_ON(!depth))
3555 return;
3556
3557 prev_hlock = NULL;
3558 for (i = depth-1; i >= 0; i--) {
3559 hlock = curr->held_locks + i;
3560 /*
3561 * We must not cross into another context:
3562 */
3563 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3564 break;
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003565 if (match_held_lock(hlock, lock))
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003566 goto found_it;
3567 prev_hlock = hlock;
3568 }
3569 print_lock_contention_bug(curr, lock, _RET_IP_);
3570 return;
3571
3572found_it:
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003573 if (hlock->instance != lock)
3574 return;
3575
Peter Zijlstra96645672007-07-19 01:49:00 -07003576 cpu = smp_processor_id();
3577 if (hlock->waittime_stamp) {
Peter Zijlstra3365e7792009-10-09 10:12:41 +02003578 now = lockstat_clock();
Peter Zijlstra96645672007-07-19 01:49:00 -07003579 waittime = now - hlock->waittime_stamp;
3580 hlock->holdtime_stamp = now;
3581 }
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003582
Frederic Weisbecker883a2a32010-05-08 06:16:11 +02003583 trace_lock_acquired(lock, ip);
Frederic Weisbecker20625012009-04-06 01:49:33 +02003584
Dave Jonesf82b2172008-08-11 09:30:23 +02003585 stats = get_lock_stats(hlock_class(hlock));
Peter Zijlstra96645672007-07-19 01:49:00 -07003586 if (waittime) {
3587 if (hlock->read)
3588 lock_time_inc(&stats->read_waittime, waittime);
3589 else
3590 lock_time_inc(&stats->write_waittime, waittime);
3591 }
3592 if (lock->cpu != cpu)
3593 stats->bounces[bounce_acquired + !!hlock->read]++;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003594 put_lock_stats(stats);
Peter Zijlstra96645672007-07-19 01:49:00 -07003595
3596 lock->cpu = cpu;
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02003597 lock->ip = ip;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003598}
3599
3600void lock_contended(struct lockdep_map *lock, unsigned long ip)
3601{
3602 unsigned long flags;
3603
3604 if (unlikely(!lock_stat))
3605 return;
3606
3607 if (unlikely(current->lockdep_recursion))
3608 return;
3609
3610 raw_local_irq_save(flags);
3611 check_flags(flags);
3612 current->lockdep_recursion = 1;
Frederic Weisbeckerdb2c4c72010-02-02 23:34:40 +01003613 trace_lock_contended(lock, ip);
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003614 __lock_contended(lock, ip);
3615 current->lockdep_recursion = 0;
3616 raw_local_irq_restore(flags);
3617}
3618EXPORT_SYMBOL_GPL(lock_contended);
3619
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02003620void lock_acquired(struct lockdep_map *lock, unsigned long ip)
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003621{
3622 unsigned long flags;
3623
3624 if (unlikely(!lock_stat))
3625 return;
3626
3627 if (unlikely(current->lockdep_recursion))
3628 return;
3629
3630 raw_local_irq_save(flags);
3631 check_flags(flags);
3632 current->lockdep_recursion = 1;
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02003633 __lock_acquired(lock, ip);
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003634 current->lockdep_recursion = 0;
3635 raw_local_irq_restore(flags);
3636}
3637EXPORT_SYMBOL_GPL(lock_acquired);
3638#endif
3639
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003640/*
3641 * Used by the testsuite, sanitize the validator state
3642 * after a simulated failure:
3643 */
3644
3645void lockdep_reset(void)
3646{
3647 unsigned long flags;
Ingo Molnar23d95a02006-12-13 00:34:40 -08003648 int i;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003649
3650 raw_local_irq_save(flags);
3651 current->curr_chain_key = 0;
3652 current->lockdep_depth = 0;
3653 current->lockdep_recursion = 0;
3654 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3655 nr_hardirq_chains = 0;
3656 nr_softirq_chains = 0;
3657 nr_process_chains = 0;
3658 debug_locks = 1;
Ingo Molnar23d95a02006-12-13 00:34:40 -08003659 for (i = 0; i < CHAINHASH_SIZE; i++)
3660 INIT_LIST_HEAD(chainhash_table + i);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003661 raw_local_irq_restore(flags);
3662}
3663
3664static void zap_class(struct lock_class *class)
3665{
3666 int i;
3667
3668 /*
3669 * Remove all dependencies this lock is
3670 * involved in:
3671 */
3672 for (i = 0; i < nr_list_entries; i++) {
3673 if (list_entries[i].class == class)
3674 list_del_rcu(&list_entries[i].entry);
3675 }
3676 /*
3677 * Unhash the class and remove it from the all_lock_classes list:
3678 */
3679 list_del_rcu(&class->hash_entry);
3680 list_del_rcu(&class->lock_entry);
3681
Rabin Vincent8bfe0292008-08-11 09:30:26 +02003682 class->key = NULL;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003683}
3684
Arjan van de Venfabe8742008-01-24 07:00:45 +01003685static inline int within(const void *addr, void *start, unsigned long size)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003686{
3687 return addr >= start && addr < start + size;
3688}
3689
3690void lockdep_free_key_range(void *start, unsigned long size)
3691{
3692 struct lock_class *class, *next;
3693 struct list_head *head;
3694 unsigned long flags;
3695 int i;
Nick Piggin5a26db52008-01-16 09:51:58 +01003696 int locked;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003697
3698 raw_local_irq_save(flags);
Nick Piggin5a26db52008-01-16 09:51:58 +01003699 locked = graph_lock();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003700
3701 /*
3702 * Unhash all classes that were created by this module:
3703 */
3704 for (i = 0; i < CLASSHASH_SIZE; i++) {
3705 head = classhash_table + i;
3706 if (list_empty(head))
3707 continue;
Arjan van de Venfabe8742008-01-24 07:00:45 +01003708 list_for_each_entry_safe(class, next, head, hash_entry) {
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003709 if (within(class->key, start, size))
3710 zap_class(class);
Arjan van de Venfabe8742008-01-24 07:00:45 +01003711 else if (within(class->name, start, size))
3712 zap_class(class);
3713 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003714 }
3715
Nick Piggin5a26db52008-01-16 09:51:58 +01003716 if (locked)
3717 graph_unlock();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003718 raw_local_irq_restore(flags);
3719}
3720
3721void lockdep_reset_lock(struct lockdep_map *lock)
3722{
Ingo Molnard6d897c2006-07-10 04:44:04 -07003723 struct lock_class *class, *next;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003724 struct list_head *head;
3725 unsigned long flags;
3726 int i, j;
Nick Piggin5a26db52008-01-16 09:51:58 +01003727 int locked;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003728
3729 raw_local_irq_save(flags);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003730
3731 /*
Ingo Molnard6d897c2006-07-10 04:44:04 -07003732 * Remove all classes this lock might have:
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003733 */
Ingo Molnard6d897c2006-07-10 04:44:04 -07003734 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3735 /*
3736 * If the class exists we look it up and zap it:
3737 */
3738 class = look_up_lock_class(lock, j);
3739 if (class)
3740 zap_class(class);
3741 }
3742 /*
3743 * Debug check: in the end all mapped classes should
3744 * be gone.
3745 */
Nick Piggin5a26db52008-01-16 09:51:58 +01003746 locked = graph_lock();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003747 for (i = 0; i < CLASSHASH_SIZE; i++) {
3748 head = classhash_table + i;
3749 if (list_empty(head))
3750 continue;
3751 list_for_each_entry_safe(class, next, head, hash_entry) {
Hitoshi Mitake62016252010-10-05 18:01:51 +09003752 int match = 0;
3753
3754 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
3755 match |= class == lock->class_cache[j];
3756
3757 if (unlikely(match)) {
Ingo Molnar74c383f2006-12-13 00:34:43 -08003758 if (debug_locks_off_graph_unlock())
3759 WARN_ON(1);
Ingo Molnard6d897c2006-07-10 04:44:04 -07003760 goto out_restore;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003761 }
3762 }
3763 }
Nick Piggin5a26db52008-01-16 09:51:58 +01003764 if (locked)
3765 graph_unlock();
Ingo Molnard6d897c2006-07-10 04:44:04 -07003766
3767out_restore:
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003768 raw_local_irq_restore(flags);
3769}
3770
Sam Ravnborg14999932007-02-28 20:12:31 -08003771void lockdep_init(void)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003772{
3773 int i;
3774
3775 /*
3776 * Some architectures have their own start_kernel()
3777 * code which calls lockdep_init(), while we also
3778 * call lockdep_init() from the start_kernel() itself,
3779 * and we want to initialize the hashes only once:
3780 */
3781 if (lockdep_initialized)
3782 return;
3783
3784 for (i = 0; i < CLASSHASH_SIZE; i++)
3785 INIT_LIST_HEAD(classhash_table + i);
3786
3787 for (i = 0; i < CHAINHASH_SIZE; i++)
3788 INIT_LIST_HEAD(chainhash_table + i);
3789
3790 lockdep_initialized = 1;
3791}
3792
3793void __init lockdep_info(void)
3794{
3795 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3796
Li Zefanb0788ca2008-11-21 15:57:32 +08003797 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003798 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
3799 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
Li Zefanb0788ca2008-11-21 15:57:32 +08003800 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003801 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
3802 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
3803 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
3804
3805 printk(" memory used by lock dependency info: %lu kB\n",
3806 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3807 sizeof(struct list_head) * CLASSHASH_SIZE +
3808 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3809 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
Ming Lei90629202009-08-02 21:43:36 +08003810 sizeof(struct list_head) * CHAINHASH_SIZE
Ming Lei4dd861d2009-07-16 15:44:29 +02003811#ifdef CONFIG_PROVE_LOCKING
Ming Leie351b662009-07-22 22:48:09 +08003812 + sizeof(struct circular_queue)
Ming Lei4dd861d2009-07-16 15:44:29 +02003813#endif
Ming Lei90629202009-08-02 21:43:36 +08003814 ) / 1024
Ming Lei4dd861d2009-07-16 15:44:29 +02003815 );
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003816
3817 printk(" per task-struct memory footprint: %lu bytes\n",
3818 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3819
3820#ifdef CONFIG_DEBUG_LOCKDEP
Johannes Bergc71063c2007-07-19 01:49:02 -07003821 if (lockdep_init_error) {
3822 printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3823 printk("Call stack leading to lockdep invocation was:\n");
3824 print_stack_trace(&lockdep_init_trace, 0);
3825 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003826#endif
3827}
3828
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003829static void
3830print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
Arjan van de Ven55794a42006-07-10 04:44:03 -07003831 const void *mem_to, struct held_lock *hlock)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003832{
3833 if (!debug_locks_off())
3834 return;
3835 if (debug_locks_silent)
3836 return;
3837
3838 printk("\n=========================\n");
3839 printk( "[ BUG: held lock freed! ]\n");
3840 printk( "-------------------------\n");
3841 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07003842 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
Arjan van de Ven55794a42006-07-10 04:44:03 -07003843 print_lock(hlock);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003844 lockdep_print_held_locks(curr);
3845
3846 printk("\nstack backtrace:\n");
3847 dump_stack();
3848}
3849
Oleg Nesterov54561782007-12-05 15:46:09 +01003850static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3851 const void* lock_from, unsigned long lock_len)
3852{
3853 return lock_from + lock_len <= mem_from ||
3854 mem_from + mem_len <= lock_from;
3855}
3856
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003857/*
3858 * Called when kernel memory is freed (or unmapped), or if a lock
3859 * is destroyed or reinitialized - this code checks whether there is
3860 * any held lock in the memory range of <from> to <to>:
3861 */
3862void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3863{
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003864 struct task_struct *curr = current;
3865 struct held_lock *hlock;
3866 unsigned long flags;
3867 int i;
3868
3869 if (unlikely(!debug_locks))
3870 return;
3871
3872 local_irq_save(flags);
3873 for (i = 0; i < curr->lockdep_depth; i++) {
3874 hlock = curr->held_locks + i;
3875
Oleg Nesterov54561782007-12-05 15:46:09 +01003876 if (not_in_range(mem_from, mem_len, hlock->instance,
3877 sizeof(*hlock->instance)))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003878 continue;
3879
Oleg Nesterov54561782007-12-05 15:46:09 +01003880 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003881 break;
3882 }
3883 local_irq_restore(flags);
3884}
Peter Zijlstraed075362006-12-06 20:35:24 -08003885EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003886
3887static void print_held_locks_bug(struct task_struct *curr)
3888{
3889 if (!debug_locks_off())
3890 return;
3891 if (debug_locks_silent)
3892 return;
3893
3894 printk("\n=====================================\n");
3895 printk( "[ BUG: lock held at task exit time! ]\n");
3896 printk( "-------------------------------------\n");
3897 printk("%s/%d is exiting with locks still held!\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07003898 curr->comm, task_pid_nr(curr));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003899 lockdep_print_held_locks(curr);
3900
3901 printk("\nstack backtrace:\n");
3902 dump_stack();
3903}
3904
3905void debug_check_no_locks_held(struct task_struct *task)
3906{
3907 if (unlikely(task->lockdep_depth > 0))
3908 print_held_locks_bug(task);
3909}
3910
3911void debug_show_all_locks(void)
3912{
3913 struct task_struct *g, *p;
3914 int count = 10;
3915 int unlock = 1;
3916
Jarek Poplawski9c35dd72007-03-22 00:11:28 -08003917 if (unlikely(!debug_locks)) {
3918 printk("INFO: lockdep is turned off.\n");
3919 return;
3920 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003921 printk("\nShowing all locks held in the system:\n");
3922
3923 /*
3924 * Here we try to get the tasklist_lock as hard as possible,
3925 * if not successful after 2 seconds we ignore it (but keep
3926 * trying). This is to enable a debug printout even if a
3927 * tasklist_lock-holding task deadlocks or crashes.
3928 */
3929retry:
3930 if (!read_trylock(&tasklist_lock)) {
3931 if (count == 10)
3932 printk("hm, tasklist_lock locked, retrying... ");
3933 if (count) {
3934 count--;
3935 printk(" #%d", 10-count);
3936 mdelay(200);
3937 goto retry;
3938 }
3939 printk(" ignoring it.\n");
3940 unlock = 0;
qinghuang feng46fec7a2008-10-28 17:24:28 +08003941 } else {
3942 if (count != 10)
3943 printk(KERN_CONT " locked it.\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003944 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003945
3946 do_each_thread(g, p) {
Ingo Molnar85684872007-12-05 15:46:09 +01003947 /*
3948 * It's not reliable to print a task's held locks
3949 * if it's not sleeping (or if it's not the current
3950 * task):
3951 */
3952 if (p->state == TASK_RUNNING && p != current)
3953 continue;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003954 if (p->lockdep_depth)
3955 lockdep_print_held_locks(p);
3956 if (!unlock)
3957 if (read_trylock(&tasklist_lock))
3958 unlock = 1;
3959 } while_each_thread(g, p);
3960
3961 printk("\n");
3962 printk("=============================================\n\n");
3963
3964 if (unlock)
3965 read_unlock(&tasklist_lock);
3966}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003967EXPORT_SYMBOL_GPL(debug_show_all_locks);
3968
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01003969/*
3970 * Careful: only use this function if you are sure that
3971 * the task cannot run in parallel!
3972 */
John Kacurf1b499f2010-08-05 17:10:53 +02003973void debug_show_held_locks(struct task_struct *task)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003974{
Jarek Poplawski9c35dd72007-03-22 00:11:28 -08003975 if (unlikely(!debug_locks)) {
3976 printk("INFO: lockdep is turned off.\n");
3977 return;
3978 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003979 lockdep_print_held_locks(task);
3980}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003981EXPORT_SYMBOL_GPL(debug_show_held_locks);
Peter Zijlstrab351d162007-10-11 22:11:12 +02003982
3983void lockdep_sys_exit(void)
3984{
3985 struct task_struct *curr = current;
3986
3987 if (unlikely(curr->lockdep_depth)) {
3988 if (!debug_locks_off())
3989 return;
3990 printk("\n================================================\n");
3991 printk( "[ BUG: lock held when returning to user space! ]\n");
3992 printk( "------------------------------------------------\n");
3993 printk("%s/%d is leaving the kernel with locks still held!\n",
3994 curr->comm, curr->pid);
3995 lockdep_print_held_locks(curr);
3996 }
3997}
Paul E. McKenney0632eb32010-02-22 17:04:47 -08003998
3999void lockdep_rcu_dereference(const char *file, const int line)
4000{
4001 struct task_struct *curr = current;
4002
Lai Jiangshan2b3fc352010-04-20 16:23:07 +08004003#ifndef CONFIG_PROVE_RCU_REPEATEDLY
Paul E. McKenney0632eb32010-02-22 17:04:47 -08004004 if (!debug_locks_off())
4005 return;
Lai Jiangshan2b3fc352010-04-20 16:23:07 +08004006#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
4007 /* Note: the following can be executed concurrently, so be careful. */
Paul E. McKenney056ba4a2010-02-25 14:06:46 -08004008 printk("\n===================================================\n");
4009 printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
4010 printk( "---------------------------------------------------\n");
Paul E. McKenney0632eb32010-02-22 17:04:47 -08004011 printk("%s:%d invoked rcu_dereference_check() without protection!\n",
4012 file, line);
4013 printk("\nother info that might help us debug this:\n\n");
Paul E. McKenneycc5b83a2010-03-03 07:46:59 -08004014 printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
Paul E. McKenney0632eb32010-02-22 17:04:47 -08004015 lockdep_print_held_locks(curr);
4016 printk("\nstack backtrace:\n");
4017 dump_stack();
4018}
4019EXPORT_SYMBOL_GPL(lockdep_rcu_dereference);