blob: bf989e063a0cdb8ad27acbf82b6fc4fbf6b3bf39 [file] [log] [blame]
Paul Mundtafbfb522006-12-04 18:17:28 +09001/*
2 * arch/sh/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
Paul Mundt5a89f1a2008-09-13 01:44:03 +09006 * Copyright (C) 2006 - 2008 Paul Mundt
Paul Mundtafbfb522006-12-04 18:17:28 +09007 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/sched.h>
13#include <linux/stacktrace.h>
14#include <linux/thread_info.h>
Heiko Carstens8b95d912008-07-14 23:32:32 +020015#include <linux/module.h>
Matt Fleming0eff9f62009-08-11 22:43:20 +010016#include <asm/unwinder.h>
Paul Mundtafbfb522006-12-04 18:17:28 +090017#include <asm/ptrace.h>
Matt Fleming4e14dfc2009-08-07 16:11:19 +010018#include <asm/stacktrace.h>
19
Matt Fleming4e14dfc2009-08-07 16:11:19 +010020static int save_stack_stack(void *data, char *name)
21{
22 return 0;
23}
Paul Mundtafbfb522006-12-04 18:17:28 +090024
25/*
26 * Save stack-backtrace addresses into a stack_trace buffer.
27 */
Matt Fleming4e14dfc2009-08-07 16:11:19 +010028static void save_stack_address(void *data, unsigned long addr, int reliable)
29{
30 struct stack_trace *trace = data;
31
Paul Mundt48e4d462009-08-15 01:05:46 +090032 if (!reliable)
33 return;
34
Matt Fleming4e14dfc2009-08-07 16:11:19 +010035 if (trace->skip > 0) {
36 trace->skip--;
37 return;
38 }
39
40 if (trace->nr_entries < trace->max_entries)
41 trace->entries[trace->nr_entries++] = addr;
42}
43
44static const struct stacktrace_ops save_stack_ops = {
Matt Fleming4e14dfc2009-08-07 16:11:19 +010045 .stack = save_stack_stack,
46 .address = save_stack_address,
47};
48
Paul Mundta3cf4ea82007-05-09 18:55:14 +090049void save_stack_trace(struct stack_trace *trace)
Paul Mundtafbfb522006-12-04 18:17:28 +090050{
Christoph Hellwigab1b6f02007-05-08 00:23:29 -070051 unsigned long *sp = (unsigned long *)current_stack_pointer;
Paul Mundtafbfb522006-12-04 18:17:28 +090052
Matt Fleming0eff9f62009-08-11 22:43:20 +010053 unwind_stack(current, NULL, sp, &save_stack_ops, trace);
Paul Mundt606b4c92009-08-15 01:11:37 +090054 if (trace->nr_entries < trace->max_entries)
55 trace->entries[trace->nr_entries++] = ULONG_MAX;
Paul Mundtafbfb522006-12-04 18:17:28 +090056}
Ingo Molnar7b4c9502008-07-03 09:17:55 +020057EXPORT_SYMBOL_GPL(save_stack_trace);
Paul Mundt5a89f1a2008-09-13 01:44:03 +090058
Matt Fleming4e14dfc2009-08-07 16:11:19 +010059static void
60save_stack_address_nosched(void *data, unsigned long addr, int reliable)
61{
62 struct stack_trace *trace = (struct stack_trace *)data;
63
Paul Mundt48e4d462009-08-15 01:05:46 +090064 if (!reliable)
65 return;
66
Matt Fleming4e14dfc2009-08-07 16:11:19 +010067 if (in_sched_functions(addr))
68 return;
69
70 if (trace->skip > 0) {
71 trace->skip--;
72 return;
73 }
74
75 if (trace->nr_entries < trace->max_entries)
76 trace->entries[trace->nr_entries++] = addr;
77}
78
79static const struct stacktrace_ops save_stack_ops_nosched = {
Matt Fleming4e14dfc2009-08-07 16:11:19 +010080 .stack = save_stack_stack,
81 .address = save_stack_address_nosched,
82};
83
Paul Mundt5a89f1a2008-09-13 01:44:03 +090084void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
85{
86 unsigned long *sp = (unsigned long *)tsk->thread.sp;
87
Matt Fleming0eff9f62009-08-11 22:43:20 +010088 unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
Paul Mundt606b4c92009-08-15 01:11:37 +090089 if (trace->nr_entries < trace->max_entries)
90 trace->entries[trace->nr_entries++] = ULONG_MAX;
Paul Mundt5a89f1a2008-09-13 01:44:03 +090091}
92EXPORT_SYMBOL_GPL(save_stack_trace_tsk);