blob: 2b30fa28b44039f631308b64cb580da29187ec67 [file] [log] [blame]
Matt Flemingbf61ad12009-08-13 19:49:03 +09001/*
2 * Copyright (C) 2009 Matt Fleming
3 *
4 * Based, in part, on kernel/time/clocksource.c.
5 *
6 * This file provides arbitration code for stack unwinders.
7 *
8 * Multiple stack unwinders can be available on a system, usually with
9 * the most accurate unwinder being the currently active one.
10 */
11#include <linux/errno.h>
12#include <linux/list.h>
13#include <linux/spinlock.h>
14#include <asm/unwinder.h>
15#include <asm/atomic.h>
16
17/*
18 * This is the most basic stack unwinder an architecture can
19 * provide. For architectures without reliable frame pointers, e.g.
20 * RISC CPUs, it can be implemented by looking through the stack for
21 * addresses that lie within the kernel text section.
22 *
23 * Other CPUs, e.g. x86, can use their frame pointer register to
24 * construct more accurate stack traces.
25 */
26static struct list_head unwinder_list;
27static struct unwinder stack_reader = {
28 .name = "stack-reader",
29 .dump = stack_reader_dump,
30 .rating = 50,
31 .list = {
32 .next = &unwinder_list,
33 .prev = &unwinder_list,
34 },
35};
36
37/*
38 * "curr_unwinder" points to the stack unwinder currently in use. This
39 * is the unwinder with the highest rating.
40 *
41 * "unwinder_list" is a linked-list of all available unwinders, sorted
42 * by rating.
43 *
44 * All modifications of "curr_unwinder" and "unwinder_list" must be
45 * performed whilst holding "unwinder_lock".
46 */
47static struct unwinder *curr_unwinder = &stack_reader;
48
49static struct list_head unwinder_list = {
50 .next = &stack_reader.list,
51 .prev = &stack_reader.list,
52};
53
54static DEFINE_SPINLOCK(unwinder_lock);
55
56static atomic_t unwinder_running = ATOMIC_INIT(0);
57
58/**
59 * select_unwinder - Select the best registered stack unwinder.
60 *
61 * Private function. Must hold unwinder_lock when called.
62 *
63 * Select the stack unwinder with the best rating. This is useful for
64 * setting up curr_unwinder.
65 */
66static struct unwinder *select_unwinder(void)
67{
68 struct unwinder *best;
69
70 if (list_empty(&unwinder_list))
71 return NULL;
72
73 best = list_entry(unwinder_list.next, struct unwinder, list);
74 if (best == curr_unwinder)
75 return NULL;
76
77 return best;
78}
79
80/*
81 * Enqueue the stack unwinder sorted by rating.
82 */
83static int unwinder_enqueue(struct unwinder *ops)
84{
85 struct list_head *tmp, *entry = &unwinder_list;
86
87 list_for_each(tmp, &unwinder_list) {
88 struct unwinder *o;
89
90 o = list_entry(tmp, struct unwinder, list);
91 if (o == ops)
92 return -EBUSY;
93 /* Keep track of the place, where to insert */
94 if (o->rating >= ops->rating)
95 entry = tmp;
96 }
97 list_add(&ops->list, entry);
98
99 return 0;
100}
101
102/**
103 * unwinder_register - Used to install new stack unwinder
104 * @u: unwinder to be registered
105 *
106 * Install the new stack unwinder on the unwinder list, which is sorted
107 * by rating.
108 *
109 * Returns -EBUSY if registration fails, zero otherwise.
110 */
111int unwinder_register(struct unwinder *u)
112{
113 unsigned long flags;
114 int ret;
115
116 spin_lock_irqsave(&unwinder_lock, flags);
117 ret = unwinder_enqueue(u);
118 if (!ret)
119 curr_unwinder = select_unwinder();
120 spin_unlock_irqrestore(&unwinder_lock, flags);
121
122 return ret;
123}
124
125/*
126 * Unwind the call stack and pass information to the stacktrace_ops
127 * functions. Also handle the case where we need to switch to a new
128 * stack dumper because the current one faulted unexpectedly.
129 */
130void unwind_stack(struct task_struct *task, struct pt_regs *regs,
131 unsigned long *sp, const struct stacktrace_ops *ops,
132 void *data)
133{
134 unsigned long flags;
135
136 /*
137 * The problem with unwinders with high ratings is that they are
138 * inherently more complicated than the simple ones with lower
139 * ratings. We are therefore more likely to fault in the
140 * complicated ones, e.g. hitting BUG()s. If we fault in the
141 * code for the current stack unwinder we try to downgrade to
142 * one with a lower rating.
143 *
144 * Hopefully this will give us a semi-reliable stacktrace so we
145 * can diagnose why curr_unwinder->dump() faulted.
146 */
147 if (atomic_inc_return(&unwinder_running) != 1) {
148 spin_lock_irqsave(&unwinder_lock, flags);
149
150 if (!list_is_singular(&unwinder_list)) {
151 list_del(&curr_unwinder->list);
152 curr_unwinder = select_unwinder();
153 }
154
155 spin_unlock_irqrestore(&unwinder_lock, flags);
156 atomic_dec(&unwinder_running);
157 }
158
159 curr_unwinder->dump(task, regs, sp, ops, data);
160
161 atomic_dec(&unwinder_running);
162}