Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /** |
| 2 | * @file backtrace.c |
| 3 | * |
| 4 | * @remark Copyright 2002 OProfile authors |
| 5 | * @remark Read the file COPYING |
| 6 | * |
| 7 | * @author John Levon |
| 8 | * @author David Smith |
| 9 | */ |
| 10 | |
| 11 | #include <linux/oprofile.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/mm.h> |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame] | 14 | #include <linux/compat.h> |
Robert Richter | 1ac2e6c | 2011-06-07 11:49:55 +0200 | [diff] [blame] | 15 | #include <linux/uaccess.h> |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame] | 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/ptrace.h> |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 18 | #include <asm/stacktrace.h> |
| 19 | |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 20 | static int backtrace_stack(void *data, char *name) |
| 21 | { |
| 22 | /* Yes, we want all stacks */ |
| 23 | return 0; |
| 24 | } |
| 25 | |
Arjan van de Ven | bc850d6 | 2008-01-30 13:33:07 +0100 | [diff] [blame] | 26 | static void backtrace_address(void *data, unsigned long addr, int reliable) |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 27 | { |
| 28 | unsigned int *depth = data; |
| 29 | |
| 30 | if ((*depth)--) |
| 31 | oprofile_add_trace(addr); |
| 32 | } |
| 33 | |
| 34 | static struct stacktrace_ops backtrace_ops = { |
Frederic Weisbecker | 61c1917 | 2009-12-17 05:40:33 +0100 | [diff] [blame] | 35 | .stack = backtrace_stack, |
| 36 | .address = backtrace_address, |
| 37 | .walk_stack = print_context_stack, |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 38 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 40 | #ifdef CONFIG_COMPAT |
| 41 | static struct stack_frame_ia32 * |
| 42 | dump_user_backtrace_32(struct stack_frame_ia32 *head) |
| 43 | { |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame] | 44 | /* Also check accessibility of one struct frame_head beyond: */ |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 45 | struct stack_frame_ia32 bufhead[2]; |
| 46 | struct stack_frame_ia32 *fp; |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame] | 47 | unsigned long bytes; |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 48 | |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame] | 49 | bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); |
Peter Zijlstra | 0a19684 | 2013-10-30 21:16:22 +0100 | [diff] [blame] | 50 | if (bytes != 0) |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 51 | return NULL; |
| 52 | |
| 53 | fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); |
| 54 | |
| 55 | oprofile_add_trace(bufhead[0].return_address); |
| 56 | |
| 57 | /* frame pointers should strictly progress back up the stack |
| 58 | * (towards higher addresses) */ |
| 59 | if (head >= fp) |
| 60 | return NULL; |
| 61 | |
| 62 | return fp; |
| 63 | } |
| 64 | |
| 65 | static inline int |
| 66 | x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) |
| 67 | { |
| 68 | struct stack_frame_ia32 *head; |
| 69 | |
H. Peter Anvin | 6bd3300 | 2012-02-06 13:03:09 -0800 | [diff] [blame] | 70 | /* User process is IA32 */ |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 71 | if (!current || !test_thread_flag(TIF_IA32)) |
| 72 | return 0; |
| 73 | |
| 74 | head = (struct stack_frame_ia32 *) regs->bp; |
| 75 | while (depth-- && head) |
| 76 | head = dump_user_backtrace_32(head); |
| 77 | |
| 78 | return 1; |
| 79 | } |
| 80 | |
| 81 | #else |
| 82 | static inline int |
| 83 | x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) |
| 84 | { |
| 85 | return 0; |
| 86 | } |
| 87 | #endif /* CONFIG_COMPAT */ |
| 88 | |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 89 | static struct stack_frame *dump_user_backtrace(struct stack_frame *head) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | { |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame] | 91 | /* Also check accessibility of one struct frame_head beyond: */ |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 92 | struct stack_frame bufhead[2]; |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame] | 93 | unsigned long bytes; |
Hugh Dickins | c34d1b4 | 2005-10-29 18:16:32 -0700 | [diff] [blame] | 94 | |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame] | 95 | bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); |
Peter Zijlstra | 0a19684 | 2013-10-30 21:16:22 +0100 | [diff] [blame] | 96 | if (bytes != 0) |
Hugh Dickins | c34d1b4 | 2005-10-29 18:16:32 -0700 | [diff] [blame] | 97 | return NULL; |
| 98 | |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 99 | oprofile_add_trace(bufhead[0].return_address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | |
| 101 | /* frame pointers should strictly progress back up the stack |
| 102 | * (towards higher addresses) */ |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 103 | if (head >= bufhead[0].next_frame) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | return NULL; |
| 105 | |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 106 | return bufhead[0].next_frame; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | } |
| 108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | void |
| 110 | x86_backtrace(struct pt_regs * const regs, unsigned int depth) |
| 111 | { |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 112 | struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
Vincent Hanquez | fa1e1bd | 2005-06-23 00:08:44 -0700 | [diff] [blame] | 114 | if (!user_mode_vm(regs)) { |
Masami Hiramatsu | 7b6c6c7 | 2009-05-11 17:03:00 -0400 | [diff] [blame] | 115 | unsigned long stack = kernel_stack_pointer(regs); |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 116 | if (depth) |
Namhyung Kim | e8e999cf | 2011-03-18 11:40:06 +0900 | [diff] [blame] | 117 | dump_trace(NULL, regs, (unsigned long *)stack, 0, |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 118 | &backtrace_ops, &depth); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | return; |
| 120 | } |
| 121 | |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 122 | if (x86_backtrace_32(regs, depth)) |
| 123 | return; |
| 124 | |
Hugh Dickins | c34d1b4 | 2005-10-29 18:16:32 -0700 | [diff] [blame] | 125 | while (depth-- && head) |
Gerald Britton | 3037944 | 2006-02-14 10:19:04 -0500 | [diff] [blame] | 126 | head = dump_user_backtrace(head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | } |