blob: 0653788026e28863c099955b9f5d61b4983b017c [file] [log] [blame]
Ingo Molnar21b32bb2006-07-03 00:24:40 -07001/*
Ingo Molnar21b32bb2006-07-03 00:24:40 -07002 * Stack trace management functions
3 *
Ingo Molnar8f47e162009-01-31 02:03:42 +01004 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
Ingo Molnar21b32bb2006-07-03 00:24:40 -07005 */
6#include <linux/sched.h>
7#include <linux/stacktrace.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -04008#include <linux/export.h>
Török Edwin02b67512008-11-22 13:28:47 +02009#include <linux/uaccess.h>
Andi Kleenc0b766f2006-09-26 10:52:34 +020010#include <asm/stacktrace.h>
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050011#include <asm/unwind.h>
Ingo Molnar21b32bb2006-07-03 00:24:40 -070012
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050013static int save_stack_address(struct stack_trace *trace, unsigned long addr,
14 bool nosched)
Ingo Molnar21b32bb2006-07-03 00:24:40 -070015{
Oleg Nesterov018378c2010-06-03 21:32:43 +020016 if (nosched && in_sched_functions(addr))
Alexei Starovoitov568b3292016-02-17 19:58:57 -080017 return 0;
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050018
Andi Kleenc0b766f2006-09-26 10:52:34 +020019 if (trace->skip > 0) {
20 trace->skip--;
Alexei Starovoitov568b3292016-02-17 19:58:57 -080021 return 0;
Andi Kleenc0b766f2006-09-26 10:52:34 +020022 }
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050023
24 if (trace->nr_entries >= trace->max_entries)
25 return -1;
26
27 trace->entries[trace->nr_entries++] = addr;
28 return 0;
29}
30
31static void __save_stack_trace(struct stack_trace *trace,
32 struct task_struct *task, struct pt_regs *regs,
33 bool nosched)
34{
35 struct unwind_state state;
36 unsigned long addr;
37
38 if (regs)
39 save_stack_address(trace, regs->ip, nosched);
40
41 for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
42 unwind_next_frame(&state)) {
43 addr = unwind_get_return_address(&state);
44 if (!addr || save_stack_address(trace, addr, nosched))
45 break;
Alexei Starovoitov568b3292016-02-17 19:58:57 -080046 }
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050047
48 if (trace->nr_entries < trace->max_entries)
49 trace->entries[trace->nr_entries++] = ULONG_MAX;
Andi Kleenc0b766f2006-09-26 10:52:34 +020050}
51
Ingo Molnar21b32bb2006-07-03 00:24:40 -070052/*
53 * Save stack-backtrace addresses into a stack_trace buffer.
Ingo Molnar21b32bb2006-07-03 00:24:40 -070054 */
Christoph Hellwigab1b6f02007-05-08 00:23:29 -070055void save_stack_trace(struct stack_trace *trace)
Ingo Molnar21b32bb2006-07-03 00:24:40 -070056{
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050057 __save_stack_trace(trace, current, NULL, false);
Ingo Molnar21b32bb2006-07-03 00:24:40 -070058}
Ingo Molnar85946982008-06-27 21:20:17 +020059EXPORT_SYMBOL_GPL(save_stack_trace);
Arjan van de Ven97455122008-01-25 21:08:34 +010060
Masami Hiramatsu39581062011-06-08 16:09:21 +090061void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
Vegard Nossumacc6be52008-05-20 11:15:43 +020062{
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050063 __save_stack_trace(trace, current, regs, false);
Vegard Nossumacc6be52008-05-20 11:15:43 +020064}
65
Arjan van de Ven97455122008-01-25 21:08:34 +010066void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
67{
Andy Lutomirski1959a602016-09-15 22:45:45 -070068 if (!try_get_task_stack(tsk))
69 return;
70
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050071 __save_stack_trace(trace, tsk, NULL, true);
Andy Lutomirski1959a602016-09-15 22:45:45 -070072
73 put_task_stack(tsk);
Arjan van de Ven97455122008-01-25 21:08:34 +010074}
Ingo Molnar85946982008-06-27 21:20:17 +020075EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
Török Edwin02b67512008-11-22 13:28:47 +020076
77/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
78
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +020079struct stack_frame_user {
Török Edwin02b67512008-11-22 13:28:47 +020080 const void __user *next_fp;
Török Edwin8d7c6a92008-11-23 12:39:06 +020081 unsigned long ret_addr;
Török Edwin02b67512008-11-22 13:28:47 +020082};
83
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +020084static int
85copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
Török Edwin02b67512008-11-22 13:28:47 +020086{
87 int ret;
88
89 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
90 return 0;
91
92 ret = 1;
93 pagefault_disable();
94 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
95 ret = 0;
96 pagefault_enable();
97
98 return ret;
99}
100
Török Edwin8d7c6a92008-11-23 12:39:06 +0200101static inline void __save_stack_trace_user(struct stack_trace *trace)
102{
103 const struct pt_regs *regs = task_pt_regs(current);
104 const void __user *fp = (const void __user *)regs->bp;
105
106 if (trace->nr_entries < trace->max_entries)
107 trace->entries[trace->nr_entries++] = regs->ip;
108
109 while (trace->nr_entries < trace->max_entries) {
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200110 struct stack_frame_user frame;
Török Edwin8d7c6a92008-11-23 12:39:06 +0200111
112 frame.next_fp = NULL;
113 frame.ret_addr = 0;
114 if (!copy_stack_frame(fp, &frame))
115 break;
116 if ((unsigned long)fp < regs->sp)
117 break;
118 if (frame.ret_addr) {
119 trace->entries[trace->nr_entries++] =
120 frame.ret_addr;
121 }
122 if (fp == frame.next_fp)
123 break;
124 fp = frame.next_fp;
125 }
126}
127
Török Edwin02b67512008-11-22 13:28:47 +0200128void save_stack_trace_user(struct stack_trace *trace)
129{
130 /*
131 * Trace user stack if we are not a kernel thread
132 */
133 if (current->mm) {
Török Edwin8d7c6a92008-11-23 12:39:06 +0200134 __save_stack_trace_user(trace);
Török Edwin02b67512008-11-22 13:28:47 +0200135 }
136 if (trace->nr_entries < trace->max_entries)
137 trace->entries[trace->nr_entries++] = ULONG_MAX;
138}
139