blob: f23b63f0a1c391303e6a13e7fb6495a6354aa9c0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Provide a default dump_stack() function for architectures
3 * which don't implement their own.
4 */
5
6#include <linux/kernel.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -05007#include <linux/export.h>
Tejun Heo196779b2013-04-30 15:27:12 -07008#include <linux/sched.h>
Alex Thorltonb58d9772013-07-03 15:04:59 -07009#include <linux/smp.h>
10#include <linux/atomic.h>
11
12static void __dump_stack(void)
13{
14 dump_stack_print_info(KERN_DEFAULT);
15 show_stack(NULL, NULL);
16}
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Tejun Heo196779b2013-04-30 15:27:12 -070018/**
19 * dump_stack - dump the current task information and its stack trace
20 *
21 * Architectures can override this implementation by implementing its own.
22 */
Alex Thorltonb58d9772013-07-03 15:04:59 -070023#ifdef CONFIG_SMP
24static atomic_t dump_lock = ATOMIC_INIT(-1);
25
Andi Kleenb6c035d2013-08-05 15:02:48 -070026asmlinkage void dump_stack(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027{
Alex Thorltonb58d9772013-07-03 15:04:59 -070028 int was_locked;
29 int old;
30 int cpu;
31
32 /*
33 * Permit this cpu to perform nested stack dumps while serialising
34 * against other CPUs
35 */
36 preempt_disable();
37
38retry:
39 cpu = smp_processor_id();
40 old = atomic_cmpxchg(&dump_lock, -1, cpu);
41 if (old == -1) {
42 was_locked = 0;
43 } else if (old == cpu) {
44 was_locked = 1;
45 } else {
46 cpu_relax();
47 goto retry;
48 }
49
50 __dump_stack();
51
52 if (!was_locked)
53 atomic_set(&dump_lock, -1);
54
55 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
Alex Thorltonb58d9772013-07-03 15:04:59 -070057#else
Andi Kleenb6c035d2013-08-05 15:02:48 -070058asmlinkage void dump_stack(void)
Alex Thorltonb58d9772013-07-03 15:04:59 -070059{
60 __dump_stack();
61}
62#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070063EXPORT_SYMBOL(dump_stack);