blob: 5f56ff3f55e02744b0180cd75825d2f8f7883e17 [file] [log] [blame]
Matt Flemingbf61ad12009-08-13 19:49:03 +09001/*
2 * Copyright (C) 2009 Matt Fleming
3 *
4 * Based, in part, on kernel/time/clocksource.c.
5 *
6 * This file provides arbitration code for stack unwinders.
7 *
8 * Multiple stack unwinders can be available on a system, usually with
9 * the most accurate unwinder being the currently active one.
10 */
11#include <linux/errno.h>
12#include <linux/list.h>
13#include <linux/spinlock.h>
Paul Mundt4ab8f242009-08-22 03:43:15 +090014#include <linux/module.h>
Matt Flemingbf61ad12009-08-13 19:49:03 +090015#include <asm/unwinder.h>
16#include <asm/atomic.h>
17
18/*
19 * This is the most basic stack unwinder an architecture can
20 * provide. For architectures without reliable frame pointers, e.g.
21 * RISC CPUs, it can be implemented by looking through the stack for
22 * addresses that lie within the kernel text section.
23 *
24 * Other CPUs, e.g. x86, can use their frame pointer register to
25 * construct more accurate stack traces.
26 */
27static struct list_head unwinder_list;
28static struct unwinder stack_reader = {
29 .name = "stack-reader",
30 .dump = stack_reader_dump,
31 .rating = 50,
32 .list = {
33 .next = &unwinder_list,
34 .prev = &unwinder_list,
35 },
36};
37
38/*
39 * "curr_unwinder" points to the stack unwinder currently in use. This
40 * is the unwinder with the highest rating.
41 *
42 * "unwinder_list" is a linked-list of all available unwinders, sorted
43 * by rating.
44 *
45 * All modifications of "curr_unwinder" and "unwinder_list" must be
46 * performed whilst holding "unwinder_lock".
47 */
48static struct unwinder *curr_unwinder = &stack_reader;
49
50static struct list_head unwinder_list = {
51 .next = &stack_reader.list,
52 .prev = &stack_reader.list,
53};
54
55static DEFINE_SPINLOCK(unwinder_lock);
56
57static atomic_t unwinder_running = ATOMIC_INIT(0);
58
59/**
60 * select_unwinder - Select the best registered stack unwinder.
61 *
62 * Private function. Must hold unwinder_lock when called.
63 *
64 * Select the stack unwinder with the best rating. This is useful for
65 * setting up curr_unwinder.
66 */
67static struct unwinder *select_unwinder(void)
68{
69 struct unwinder *best;
70
71 if (list_empty(&unwinder_list))
72 return NULL;
73
74 best = list_entry(unwinder_list.next, struct unwinder, list);
75 if (best == curr_unwinder)
76 return NULL;
77
78 return best;
79}
80
81/*
82 * Enqueue the stack unwinder sorted by rating.
83 */
84static int unwinder_enqueue(struct unwinder *ops)
85{
86 struct list_head *tmp, *entry = &unwinder_list;
87
88 list_for_each(tmp, &unwinder_list) {
89 struct unwinder *o;
90
91 o = list_entry(tmp, struct unwinder, list);
92 if (o == ops)
93 return -EBUSY;
94 /* Keep track of the place, where to insert */
95 if (o->rating >= ops->rating)
96 entry = tmp;
97 }
98 list_add(&ops->list, entry);
99
100 return 0;
101}
102
103/**
104 * unwinder_register - Used to install new stack unwinder
105 * @u: unwinder to be registered
106 *
107 * Install the new stack unwinder on the unwinder list, which is sorted
108 * by rating.
109 *
110 * Returns -EBUSY if registration fails, zero otherwise.
111 */
112int unwinder_register(struct unwinder *u)
113{
114 unsigned long flags;
115 int ret;
116
117 spin_lock_irqsave(&unwinder_lock, flags);
118 ret = unwinder_enqueue(u);
119 if (!ret)
120 curr_unwinder = select_unwinder();
121 spin_unlock_irqrestore(&unwinder_lock, flags);
122
123 return ret;
124}
125
126/*
127 * Unwind the call stack and pass information to the stacktrace_ops
128 * functions. Also handle the case where we need to switch to a new
129 * stack dumper because the current one faulted unexpectedly.
130 */
131void unwind_stack(struct task_struct *task, struct pt_regs *regs,
132 unsigned long *sp, const struct stacktrace_ops *ops,
133 void *data)
134{
135 unsigned long flags;
136
137 /*
138 * The problem with unwinders with high ratings is that they are
139 * inherently more complicated than the simple ones with lower
140 * ratings. We are therefore more likely to fault in the
141 * complicated ones, e.g. hitting BUG()s. If we fault in the
142 * code for the current stack unwinder we try to downgrade to
143 * one with a lower rating.
144 *
145 * Hopefully this will give us a semi-reliable stacktrace so we
146 * can diagnose why curr_unwinder->dump() faulted.
147 */
148 if (atomic_inc_return(&unwinder_running) != 1) {
149 spin_lock_irqsave(&unwinder_lock, flags);
150
151 if (!list_is_singular(&unwinder_list)) {
152 list_del(&curr_unwinder->list);
153 curr_unwinder = select_unwinder();
154 }
155
156 spin_unlock_irqrestore(&unwinder_lock, flags);
157 atomic_dec(&unwinder_running);
158 }
159
160 curr_unwinder->dump(task, regs, sp, ops, data);
161
162 atomic_dec(&unwinder_running);
163}
Paul Mundt4ab8f242009-08-22 03:43:15 +0900164EXPORT_SYMBOL_GPL(unwind_stack);