Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Code for replacing ftrace calls with jumps. |
| 3 | * |
| 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 5 | * |
| 6 | * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. |
| 7 | * |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 8 | * Added function graph tracer code, taken from x86 that was written |
| 9 | * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. |
| 10 | * |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 11 | */ |
| 12 | |
| 13 | #include <linux/spinlock.h> |
| 14 | #include <linux/hardirq.h> |
Steven Rostedt | e4486fe | 2008-11-14 16:21:20 -0800 | [diff] [blame] | 15 | #include <linux/uaccess.h> |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 16 | #include <linux/module.h> |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 17 | #include <linux/ftrace.h> |
| 18 | #include <linux/percpu.h> |
| 19 | #include <linux/init.h> |
| 20 | #include <linux/list.h> |
| 21 | |
| 22 | #include <asm/cacheflush.h> |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 23 | #include <asm/code-patching.h> |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 24 | #include <asm/ftrace.h> |
Ian Munsie | 02424d8 | 2011-02-02 17:27:24 +0000 | [diff] [blame] | 25 | #include <asm/syscall.h> |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 26 | |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 27 | |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 28 | #ifdef CONFIG_DYNAMIC_FTRACE |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 29 | static unsigned int |
Steven Rostedt | 4654288 | 2009-02-10 22:19:54 -0800 | [diff] [blame] | 30 | ftrace_call_replace(unsigned long ip, unsigned long addr, int link) |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 31 | { |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 32 | unsigned int op; |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 33 | |
Michael Ellerman | 4a9e3f8 | 2009-05-28 19:33:34 +0000 | [diff] [blame] | 34 | addr = ppc_function_entry((void *)addr); |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 35 | |
Steven Rostedt | 4654288 | 2009-02-10 22:19:54 -0800 | [diff] [blame] | 36 | /* if (link) set op to 'bl' else 'b' */ |
Steven Rostedt | bb9b903 | 2009-02-13 06:45:27 -0800 | [diff] [blame] | 37 | op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 38 | |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 39 | return op; |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 40 | } |
| 41 | |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 42 | static int |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 43 | ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 44 | { |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 45 | unsigned int replaced; |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 46 | |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 47 | /* |
| 48 | * Note: Due to modules and __init, code can |
| 49 | * disappear and change, we need to protect against faulting |
Steven Rostedt | e4486fe | 2008-11-14 16:21:20 -0800 | [diff] [blame] | 50 | * as well as code changing. We do this by using the |
| 51 | * probe_kernel_* functions. |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 52 | * |
| 53 | * No real locking needed, this code is run through |
Steven Rostedt | e4486fe | 2008-11-14 16:21:20 -0800 | [diff] [blame] | 54 | * kstop_machine, or before SMP starts. |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 55 | */ |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 56 | |
Steven Rostedt | e4486fe | 2008-11-14 16:21:20 -0800 | [diff] [blame] | 57 | /* read the text we want to modify */ |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 58 | if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
Steven Rostedt | e4486fe | 2008-11-14 16:21:20 -0800 | [diff] [blame] | 59 | return -EFAULT; |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 60 | |
Steven Rostedt | e4486fe | 2008-11-14 16:21:20 -0800 | [diff] [blame] | 61 | /* Make sure it is what we expect it to be */ |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 62 | if (replaced != old) |
Steven Rostedt | e4486fe | 2008-11-14 16:21:20 -0800 | [diff] [blame] | 63 | return -EINVAL; |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 64 | |
Steven Rostedt | e4486fe | 2008-11-14 16:21:20 -0800 | [diff] [blame] | 65 | /* replace the text with the new text */ |
Steven Rostedt | 65b8c72 | 2012-04-26 08:31:19 +0000 | [diff] [blame] | 66 | if (patch_instruction((unsigned int *)ip, new)) |
Steven Rostedt | e4486fe | 2008-11-14 16:21:20 -0800 | [diff] [blame] | 67 | return -EPERM; |
| 68 | |
Steven Rostedt | e4486fe | 2008-11-14 16:21:20 -0800 | [diff] [blame] | 69 | return 0; |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 70 | } |
| 71 | |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 72 | /* |
| 73 | * Helper functions that are the same for both PPC64 and PPC32. |
| 74 | */ |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 75 | static int test_24bit_addr(unsigned long ip, unsigned long addr) |
| 76 | { |
Liu Ping Fan | a95fc58 | 2014-02-26 10:23:01 +0800 | [diff] [blame] | 77 | addr = ppc_function_entry((void *)addr); |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 78 | |
Steven Rostedt | 0029ff8 | 2008-11-25 14:06:19 -0800 | [diff] [blame] | 79 | /* use the create_branch to verify that this offset can be branched */ |
| 80 | return create_branch((unsigned int *)ip, addr, 0); |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 81 | } |
| 82 | |
Steven Rostedt | 17be5b3 | 2009-02-05 21:33:09 -0800 | [diff] [blame] | 83 | #ifdef CONFIG_MODULES |
| 84 | |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 85 | static int is_bl_op(unsigned int op) |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 86 | { |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 87 | return (op & 0xfc000003) == 0x48000001; |
| 88 | } |
| 89 | |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 90 | static unsigned long find_bl_target(unsigned long ip, unsigned int op) |
| 91 | { |
| 92 | static int offset; |
| 93 | |
| 94 | offset = (op & 0x03fffffc); |
| 95 | /* make it signed */ |
| 96 | if (offset & 0x02000000) |
| 97 | offset |= 0xfe000000; |
| 98 | |
| 99 | return ip + (long)offset; |
| 100 | } |
| 101 | |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 102 | #ifdef CONFIG_PPC64 |
| 103 | static int |
| 104 | __ftrace_make_nop(struct module *mod, |
| 105 | struct dyn_ftrace *rec, unsigned long addr) |
| 106 | { |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 107 | unsigned int op; |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 108 | unsigned long ptr; |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 109 | unsigned long ip = rec->ip; |
Anton Blanchard | 62c9da6 | 2014-04-04 16:52:58 +1100 | [diff] [blame^] | 110 | void *tramp; |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 111 | |
| 112 | /* read where this goes */ |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 113 | if (probe_kernel_read(&op, (void *)ip, sizeof(int))) |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 114 | return -EFAULT; |
| 115 | |
| 116 | /* Make sure that that this is still a 24bit jump */ |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 117 | if (!is_bl_op(op)) { |
| 118 | printk(KERN_ERR "Not expected bl: opcode is %x\n", op); |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 119 | return -EINVAL; |
| 120 | } |
| 121 | |
| 122 | /* lets find where the pointer goes */ |
Anton Blanchard | 62c9da6 | 2014-04-04 16:52:58 +1100 | [diff] [blame^] | 123 | tramp = (void *)find_bl_target(ip, op); |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 124 | |
Anton Blanchard | 62c9da6 | 2014-04-04 16:52:58 +1100 | [diff] [blame^] | 125 | pr_devel("ip:%lx jumps to %p", ip, tramp); |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 126 | |
Anton Blanchard | 62c9da6 | 2014-04-04 16:52:58 +1100 | [diff] [blame^] | 127 | if (!is_module_trampoline(tramp)) { |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 128 | printk(KERN_ERR "Not a trampoline\n"); |
| 129 | return -EINVAL; |
| 130 | } |
| 131 | |
Anton Blanchard | 62c9da6 | 2014-04-04 16:52:58 +1100 | [diff] [blame^] | 132 | if (module_trampoline_target(mod, tramp, &ptr)) { |
| 133 | printk(KERN_ERR "Failed to get trampoline target\n"); |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 134 | return -EFAULT; |
| 135 | } |
| 136 | |
Anton Blanchard | 62c9da6 | 2014-04-04 16:52:58 +1100 | [diff] [blame^] | 137 | pr_devel("trampoline target %lx", ptr); |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 138 | |
| 139 | /* This should match what was called */ |
Michael Ellerman | 4a9e3f8 | 2009-05-28 19:33:34 +0000 | [diff] [blame] | 140 | if (ptr != ppc_function_entry((void *)addr)) { |
Anton Blanchard | 62c9da6 | 2014-04-04 16:52:58 +1100 | [diff] [blame^] | 141 | printk(KERN_ERR "addr %lx does not match expected %lx\n", |
| 142 | ptr, ppc_function_entry((void *)addr)); |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 143 | return -EINVAL; |
| 144 | } |
| 145 | |
| 146 | /* |
Anton Blanchard | 62c9da6 | 2014-04-04 16:52:58 +1100 | [diff] [blame^] | 147 | * Our original call site looks like: |
| 148 | * |
| 149 | * bl <tramp> |
| 150 | * ld r2,XX(r1) |
| 151 | * |
| 152 | * Milton Miller pointed out that we can not simply nop the branch. |
| 153 | * If a task was preempted when calling a trace function, the nops |
| 154 | * will remove the way to restore the TOC in r2 and the r2 TOC will |
| 155 | * get corrupted. |
| 156 | * |
| 157 | * Use a b +8 to jump over the load. |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 158 | */ |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 159 | op = 0x48000008; /* b +8 */ |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 160 | |
Steven Rostedt | 65b8c72 | 2012-04-26 08:31:19 +0000 | [diff] [blame] | 161 | if (patch_instruction((unsigned int *)ip, op)) |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 162 | return -EPERM; |
| 163 | |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 164 | return 0; |
| 165 | } |
| 166 | |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 167 | #else /* !PPC64 */ |
| 168 | static int |
| 169 | __ftrace_make_nop(struct module *mod, |
| 170 | struct dyn_ftrace *rec, unsigned long addr) |
| 171 | { |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 172 | unsigned int op; |
| 173 | unsigned int jmp[4]; |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 174 | unsigned long ip = rec->ip; |
| 175 | unsigned long tramp; |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 176 | |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 177 | if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 178 | return -EFAULT; |
| 179 | |
| 180 | /* Make sure that that this is still a 24bit jump */ |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 181 | if (!is_bl_op(op)) { |
| 182 | printk(KERN_ERR "Not expected bl: opcode is %x\n", op); |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 183 | return -EINVAL; |
| 184 | } |
| 185 | |
| 186 | /* lets find where the pointer goes */ |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 187 | tramp = find_bl_target(ip, op); |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 188 | |
| 189 | /* |
| 190 | * On PPC32 the trampoline looks like: |
roger blofeld | fd5a429 | 2012-06-21 05:27:14 +0000 | [diff] [blame] | 191 | * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha |
| 192 | * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l |
| 193 | * 0x7d, 0x89, 0x03, 0xa6 mtctr r12 |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 194 | * 0x4e, 0x80, 0x04, 0x20 bctr |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 195 | */ |
| 196 | |
Michael Ellerman | 021376a | 2009-05-13 20:30:24 +0000 | [diff] [blame] | 197 | pr_devel("ip:%lx jumps to %lx", ip, tramp); |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 198 | |
| 199 | /* Find where the trampoline jumps to */ |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 200 | if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 201 | printk(KERN_ERR "Failed to read %lx\n", tramp); |
| 202 | return -EFAULT; |
| 203 | } |
| 204 | |
Michael Ellerman | 021376a | 2009-05-13 20:30:24 +0000 | [diff] [blame] | 205 | pr_devel(" %08x %08x ", jmp[0], jmp[1]); |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 206 | |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 207 | /* verify that this is what we expect it to be */ |
roger blofeld | fd5a429 | 2012-06-21 05:27:14 +0000 | [diff] [blame] | 208 | if (((jmp[0] & 0xffff0000) != 0x3d800000) || |
| 209 | ((jmp[1] & 0xffff0000) != 0x398c0000) || |
| 210 | (jmp[2] != 0x7d8903a6) || |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 211 | (jmp[3] != 0x4e800420)) { |
| 212 | printk(KERN_ERR "Not a trampoline\n"); |
| 213 | return -EINVAL; |
| 214 | } |
| 215 | |
| 216 | tramp = (jmp[1] & 0xffff) | |
| 217 | ((jmp[0] & 0xffff) << 16); |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 218 | if (tramp & 0x8000) |
| 219 | tramp -= 0x10000; |
| 220 | |
Michael Ellerman | 021376a | 2009-05-13 20:30:24 +0000 | [diff] [blame] | 221 | pr_devel(" %lx ", tramp); |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 222 | |
| 223 | if (tramp != addr) { |
| 224 | printk(KERN_ERR |
| 225 | "Trampoline location %08lx does not match addr\n", |
| 226 | tramp); |
| 227 | return -EINVAL; |
| 228 | } |
| 229 | |
Kumar Gala | 16c57b3 | 2009-02-10 20:10:44 +0000 | [diff] [blame] | 230 | op = PPC_INST_NOP; |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 231 | |
Steven Rostedt | 65b8c72 | 2012-04-26 08:31:19 +0000 | [diff] [blame] | 232 | if (patch_instruction((unsigned int *)ip, op)) |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 233 | return -EPERM; |
| 234 | |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 235 | return 0; |
| 236 | } |
| 237 | #endif /* PPC64 */ |
Steven Rostedt | 17be5b3 | 2009-02-05 21:33:09 -0800 | [diff] [blame] | 238 | #endif /* CONFIG_MODULES */ |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 239 | |
| 240 | int ftrace_make_nop(struct module *mod, |
| 241 | struct dyn_ftrace *rec, unsigned long addr) |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 242 | { |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 243 | unsigned long ip = rec->ip; |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 244 | unsigned int old, new; |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 245 | |
| 246 | /* |
| 247 | * If the calling address is more that 24 bits away, |
| 248 | * then we had to use a trampoline to make the call. |
| 249 | * Otherwise just update the call site. |
| 250 | */ |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 251 | if (test_24bit_addr(ip, addr)) { |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 252 | /* within range */ |
Steven Rostedt | 4654288 | 2009-02-10 22:19:54 -0800 | [diff] [blame] | 253 | old = ftrace_call_replace(ip, addr, 1); |
Michael Ellerman | 92e02a5 | 2009-05-28 19:33:36 +0000 | [diff] [blame] | 254 | new = PPC_INST_NOP; |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 255 | return ftrace_modify_code(ip, old, new); |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 256 | } |
| 257 | |
Steven Rostedt | 17be5b3 | 2009-02-05 21:33:09 -0800 | [diff] [blame] | 258 | #ifdef CONFIG_MODULES |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 259 | /* |
| 260 | * Out of range jumps are called from modules. |
| 261 | * We should either already have a pointer to the module |
| 262 | * or it has been passed in. |
| 263 | */ |
| 264 | if (!rec->arch.mod) { |
| 265 | if (!mod) { |
| 266 | printk(KERN_ERR "No module loaded addr=%lx\n", |
| 267 | addr); |
| 268 | return -EFAULT; |
| 269 | } |
| 270 | rec->arch.mod = mod; |
| 271 | } else if (mod) { |
| 272 | if (mod != rec->arch.mod) { |
| 273 | printk(KERN_ERR |
| 274 | "Record mod %p not equal to passed in mod %p\n", |
| 275 | rec->arch.mod, mod); |
| 276 | return -EINVAL; |
| 277 | } |
| 278 | /* nothing to do if mod == rec->arch.mod */ |
| 279 | } else |
| 280 | mod = rec->arch.mod; |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 281 | |
| 282 | return __ftrace_make_nop(mod, rec, addr); |
Steven Rostedt | 17be5b3 | 2009-02-05 21:33:09 -0800 | [diff] [blame] | 283 | #else |
| 284 | /* We should not get here without modules */ |
| 285 | return -EINVAL; |
| 286 | #endif /* CONFIG_MODULES */ |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 287 | } |
| 288 | |
Steven Rostedt | 17be5b3 | 2009-02-05 21:33:09 -0800 | [diff] [blame] | 289 | #ifdef CONFIG_MODULES |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 290 | #ifdef CONFIG_PPC64 |
| 291 | static int |
| 292 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
| 293 | { |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 294 | unsigned int op[2]; |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 295 | unsigned long ip = rec->ip; |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 296 | |
| 297 | /* read where this goes */ |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 298 | if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 299 | return -EFAULT; |
| 300 | |
| 301 | /* |
| 302 | * It should be pointing to two nops or |
| 303 | * b +8; ld r2,40(r1) |
| 304 | */ |
| 305 | if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && |
Kumar Gala | 16c57b3 | 2009-02-10 20:10:44 +0000 | [diff] [blame] | 306 | ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) { |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 307 | printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); |
| 308 | return -EINVAL; |
| 309 | } |
| 310 | |
| 311 | /* If we never set up a trampoline to ftrace_caller, then bail */ |
| 312 | if (!rec->arch.mod->arch.tramp) { |
| 313 | printk(KERN_ERR "No ftrace trampoline\n"); |
| 314 | return -EINVAL; |
| 315 | } |
| 316 | |
Steven Rostedt | 0029ff8 | 2008-11-25 14:06:19 -0800 | [diff] [blame] | 317 | /* create the branch to the trampoline */ |
| 318 | op[0] = create_branch((unsigned int *)ip, |
| 319 | rec->arch.mod->arch.tramp, BRANCH_SET_LINK); |
| 320 | if (!op[0]) { |
| 321 | printk(KERN_ERR "REL24 out of range!\n"); |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 322 | return -EINVAL; |
| 323 | } |
| 324 | |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 325 | /* ld r2,40(r1) */ |
| 326 | op[1] = 0xe8410028; |
| 327 | |
Michael Ellerman | 021376a | 2009-05-13 20:30:24 +0000 | [diff] [blame] | 328 | pr_devel("write to %lx\n", rec->ip); |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 329 | |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 330 | if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 331 | return -EPERM; |
| 332 | |
Steven Rostedt | ec682ce | 2008-11-25 10:22:48 -0800 | [diff] [blame] | 333 | flush_icache_range(ip, ip + 8); |
| 334 | |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 335 | return 0; |
| 336 | } |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 337 | #else |
| 338 | static int |
| 339 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
| 340 | { |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 341 | unsigned int op; |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 342 | unsigned long ip = rec->ip; |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 343 | |
| 344 | /* read where this goes */ |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 345 | if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 346 | return -EFAULT; |
| 347 | |
| 348 | /* It should be pointing to a nop */ |
Kumar Gala | 16c57b3 | 2009-02-10 20:10:44 +0000 | [diff] [blame] | 349 | if (op != PPC_INST_NOP) { |
Steven Rostedt | d9af12b7 | 2008-11-25 06:39:18 -0800 | [diff] [blame] | 350 | printk(KERN_ERR "Expected NOP but have %x\n", op); |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 351 | return -EINVAL; |
| 352 | } |
| 353 | |
| 354 | /* If we never set up a trampoline to ftrace_caller, then bail */ |
| 355 | if (!rec->arch.mod->arch.tramp) { |
| 356 | printk(KERN_ERR "No ftrace trampoline\n"); |
| 357 | return -EINVAL; |
| 358 | } |
| 359 | |
Steven Rostedt | 0029ff8 | 2008-11-25 14:06:19 -0800 | [diff] [blame] | 360 | /* create the branch to the trampoline */ |
| 361 | op = create_branch((unsigned int *)ip, |
| 362 | rec->arch.mod->arch.tramp, BRANCH_SET_LINK); |
| 363 | if (!op) { |
| 364 | printk(KERN_ERR "REL24 out of range!\n"); |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 365 | return -EINVAL; |
| 366 | } |
| 367 | |
Michael Ellerman | 021376a | 2009-05-13 20:30:24 +0000 | [diff] [blame] | 368 | pr_devel("write to %lx\n", rec->ip); |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 369 | |
Steven Rostedt | 65b8c72 | 2012-04-26 08:31:19 +0000 | [diff] [blame] | 370 | if (patch_instruction((unsigned int *)ip, op)) |
Steven Rostedt | 7cc45e6 | 2008-11-15 02:39:05 -0500 | [diff] [blame] | 371 | return -EPERM; |
| 372 | |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 373 | return 0; |
| 374 | } |
| 375 | #endif /* CONFIG_PPC64 */ |
Steven Rostedt | 17be5b3 | 2009-02-05 21:33:09 -0800 | [diff] [blame] | 376 | #endif /* CONFIG_MODULES */ |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 377 | |
| 378 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
| 379 | { |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 380 | unsigned long ip = rec->ip; |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 381 | unsigned int old, new; |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 382 | |
| 383 | /* |
| 384 | * If the calling address is more that 24 bits away, |
| 385 | * then we had to use a trampoline to make the call. |
| 386 | * Otherwise just update the call site. |
| 387 | */ |
| 388 | if (test_24bit_addr(ip, addr)) { |
| 389 | /* within range */ |
Michael Ellerman | 92e02a5 | 2009-05-28 19:33:36 +0000 | [diff] [blame] | 390 | old = PPC_INST_NOP; |
Steven Rostedt | 4654288 | 2009-02-10 22:19:54 -0800 | [diff] [blame] | 391 | new = ftrace_call_replace(ip, addr, 1); |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 392 | return ftrace_modify_code(ip, old, new); |
| 393 | } |
| 394 | |
Steven Rostedt | 17be5b3 | 2009-02-05 21:33:09 -0800 | [diff] [blame] | 395 | #ifdef CONFIG_MODULES |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 396 | /* |
| 397 | * Out of range jumps are called from modules. |
| 398 | * Being that we are converting from nop, it had better |
| 399 | * already have a module defined. |
| 400 | */ |
| 401 | if (!rec->arch.mod) { |
| 402 | printk(KERN_ERR "No module loaded\n"); |
| 403 | return -EINVAL; |
| 404 | } |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 405 | |
| 406 | return __ftrace_make_call(rec, addr); |
Steven Rostedt | 17be5b3 | 2009-02-05 21:33:09 -0800 | [diff] [blame] | 407 | #else |
| 408 | /* We should not get here without modules */ |
| 409 | return -EINVAL; |
| 410 | #endif /* CONFIG_MODULES */ |
Steven Rostedt | f48cb8b | 2008-11-14 20:47:03 -0800 | [diff] [blame] | 411 | } |
Steven Rostedt | 8fd6e5a | 2008-11-14 16:21:19 -0800 | [diff] [blame] | 412 | |
Steven Rostedt | 15adc04 | 2008-10-23 09:33:08 -0400 | [diff] [blame] | 413 | int ftrace_update_ftrace_func(ftrace_func_t func) |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 414 | { |
| 415 | unsigned long ip = (unsigned long)(&ftrace_call); |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 416 | unsigned int old, new; |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 417 | int ret; |
| 418 | |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 419 | old = *(unsigned int *)&ftrace_call; |
Steven Rostedt | 4654288 | 2009-02-10 22:19:54 -0800 | [diff] [blame] | 420 | new = ftrace_call_replace(ip, (unsigned long)func, 1); |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 421 | ret = ftrace_modify_code(ip, old, new); |
| 422 | |
| 423 | return ret; |
| 424 | } |
| 425 | |
Steven Rostedt | ee456bb | 2012-04-26 08:31:17 +0000 | [diff] [blame] | 426 | static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
| 427 | { |
| 428 | unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR; |
| 429 | int ret; |
| 430 | |
| 431 | ret = ftrace_update_record(rec, enable); |
| 432 | |
| 433 | switch (ret) { |
| 434 | case FTRACE_UPDATE_IGNORE: |
| 435 | return 0; |
| 436 | case FTRACE_UPDATE_MAKE_CALL: |
| 437 | return ftrace_make_call(rec, ftrace_addr); |
| 438 | case FTRACE_UPDATE_MAKE_NOP: |
| 439 | return ftrace_make_nop(NULL, rec, ftrace_addr); |
| 440 | } |
| 441 | |
| 442 | return 0; |
| 443 | } |
| 444 | |
| 445 | void ftrace_replace_code(int enable) |
| 446 | { |
| 447 | struct ftrace_rec_iter *iter; |
| 448 | struct dyn_ftrace *rec; |
| 449 | int ret; |
| 450 | |
| 451 | for (iter = ftrace_rec_iter_start(); iter; |
| 452 | iter = ftrace_rec_iter_next(iter)) { |
| 453 | rec = ftrace_rec_iter_record(iter); |
| 454 | ret = __ftrace_replace_code(rec, enable); |
| 455 | if (ret) { |
| 456 | ftrace_bug(ret, rec->ip); |
| 457 | return; |
| 458 | } |
| 459 | } |
| 460 | } |
| 461 | |
| 462 | void arch_ftrace_update_code(int command) |
| 463 | { |
| 464 | if (command & FTRACE_UPDATE_CALLS) |
| 465 | ftrace_replace_code(1); |
| 466 | else if (command & FTRACE_DISABLE_CALLS) |
| 467 | ftrace_replace_code(0); |
| 468 | |
| 469 | if (command & FTRACE_UPDATE_TRACE_FUNC) |
| 470 | ftrace_update_ftrace_func(ftrace_trace_function); |
| 471 | |
| 472 | if (command & FTRACE_START_FUNC_RET) |
| 473 | ftrace_enable_ftrace_graph_caller(); |
| 474 | else if (command & FTRACE_STOP_FUNC_RET) |
| 475 | ftrace_disable_ftrace_graph_caller(); |
| 476 | } |
| 477 | |
Jiri Slaby | 3a36cb1 | 2014-02-24 19:59:59 +0100 | [diff] [blame] | 478 | int __init ftrace_dyn_arch_init(void) |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 479 | { |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 480 | return 0; |
| 481 | } |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 482 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 483 | |
| 484 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 485 | |
Steven Rostedt | 4654288 | 2009-02-10 22:19:54 -0800 | [diff] [blame] | 486 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 487 | extern void ftrace_graph_call(void); |
| 488 | extern void ftrace_graph_stub(void); |
| 489 | |
| 490 | int ftrace_enable_ftrace_graph_caller(void) |
| 491 | { |
| 492 | unsigned long ip = (unsigned long)(&ftrace_graph_call); |
| 493 | unsigned long addr = (unsigned long)(&ftrace_graph_caller); |
| 494 | unsigned long stub = (unsigned long)(&ftrace_graph_stub); |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 495 | unsigned int old, new; |
Steven Rostedt | 4654288 | 2009-02-10 22:19:54 -0800 | [diff] [blame] | 496 | |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 497 | old = ftrace_call_replace(ip, stub, 0); |
Steven Rostedt | 4654288 | 2009-02-10 22:19:54 -0800 | [diff] [blame] | 498 | new = ftrace_call_replace(ip, addr, 0); |
| 499 | |
| 500 | return ftrace_modify_code(ip, old, new); |
| 501 | } |
| 502 | |
| 503 | int ftrace_disable_ftrace_graph_caller(void) |
| 504 | { |
| 505 | unsigned long ip = (unsigned long)(&ftrace_graph_call); |
| 506 | unsigned long addr = (unsigned long)(&ftrace_graph_caller); |
| 507 | unsigned long stub = (unsigned long)(&ftrace_graph_stub); |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 508 | unsigned int old, new; |
Steven Rostedt | 4654288 | 2009-02-10 22:19:54 -0800 | [diff] [blame] | 509 | |
Steven Rostedt | b54dcfe | 2009-02-13 06:31:39 -0800 | [diff] [blame] | 510 | old = ftrace_call_replace(ip, addr, 0); |
Steven Rostedt | 4654288 | 2009-02-10 22:19:54 -0800 | [diff] [blame] | 511 | new = ftrace_call_replace(ip, stub, 0); |
| 512 | |
| 513 | return ftrace_modify_code(ip, old, new); |
| 514 | } |
| 515 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 516 | |
Steven Rostedt | bb72534 | 2009-02-11 12:45:49 -0800 | [diff] [blame] | 517 | #ifdef CONFIG_PPC64 |
| 518 | extern void mod_return_to_handler(void); |
| 519 | #endif |
| 520 | |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 521 | /* |
| 522 | * Hook the return address and push it in the stack of return addrs |
| 523 | * in current thread info. |
| 524 | */ |
| 525 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) |
| 526 | { |
| 527 | unsigned long old; |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 528 | int faulted; |
| 529 | struct ftrace_graph_ent trace; |
Steven Rostedt | bb72534 | 2009-02-11 12:45:49 -0800 | [diff] [blame] | 530 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 531 | |
| 532 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
| 533 | return; |
| 534 | |
Michael Ellerman | f4952f6 | 2009-04-06 04:40:45 +0000 | [diff] [blame] | 535 | #ifdef CONFIG_PPC64 |
Steven Rostedt | bb72534 | 2009-02-11 12:45:49 -0800 | [diff] [blame] | 536 | /* non core kernel code needs to save and restore the TOC */ |
| 537 | if (REGION_ID(self_addr) != KERNEL_REGION_ID) |
| 538 | return_hooker = (unsigned long)&mod_return_to_handler; |
| 539 | #endif |
| 540 | |
Michael Ellerman | 4a9e3f8 | 2009-05-28 19:33:34 +0000 | [diff] [blame] | 541 | return_hooker = ppc_function_entry((void *)return_hooker); |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 542 | |
| 543 | /* |
| 544 | * Protect against fault, even if it shouldn't |
| 545 | * happen. This tool is too much intrusive to |
| 546 | * ignore such a protection. |
| 547 | */ |
| 548 | asm volatile( |
| 549 | "1: " PPC_LL "%[old], 0(%[parent])\n" |
| 550 | "2: " PPC_STL "%[return_hooker], 0(%[parent])\n" |
| 551 | " li %[faulted], 0\n" |
Steven Rostedt | fad4f47 | 2009-02-11 19:10:57 -0500 | [diff] [blame] | 552 | "3:\n" |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 553 | |
| 554 | ".section .fixup, \"ax\"\n" |
| 555 | "4: li %[faulted], 1\n" |
| 556 | " b 3b\n" |
| 557 | ".previous\n" |
| 558 | |
| 559 | ".section __ex_table,\"a\"\n" |
| 560 | PPC_LONG_ALIGN "\n" |
| 561 | PPC_LONG "1b,4b\n" |
| 562 | PPC_LONG "2b,4b\n" |
| 563 | ".previous" |
| 564 | |
Steven Rostedt | c3cf866 | 2009-05-15 04:33:54 +0000 | [diff] [blame] | 565 | : [old] "=&r" (old), [faulted] "=r" (faulted) |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 566 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) |
| 567 | : "memory" |
| 568 | ); |
| 569 | |
| 570 | if (unlikely(faulted)) { |
| 571 | ftrace_graph_stop(); |
| 572 | WARN_ON(1); |
| 573 | return; |
| 574 | } |
| 575 | |
Steven Rostedt | bac821a | 2012-07-18 12:35:28 +0000 | [diff] [blame] | 576 | trace.func = self_addr; |
| 577 | trace.depth = current->curr_ret_stack + 1; |
| 578 | |
| 579 | /* Only trace if the calling function expects to */ |
| 580 | if (!ftrace_graph_entry(&trace)) { |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 581 | *parent = old; |
| 582 | return; |
| 583 | } |
| 584 | |
Steven Rostedt | bac821a | 2012-07-18 12:35:28 +0000 | [diff] [blame] | 585 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 586 | *parent = old; |
Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 587 | } |
| 588 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
Ian Munsie | 02424d8 | 2011-02-02 17:27:24 +0000 | [diff] [blame] | 589 | |
| 590 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) |
| 591 | unsigned long __init arch_syscall_addr(int nr) |
| 592 | { |
| 593 | return sys_call_table[nr*2]; |
| 594 | } |
| 595 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ |