blob: 9fb1876cb0bd8b0a6c8b7db3f893337bc4d77770 [file] [log] [blame]
David Daneyc1bf2072010-08-03 11:22:20 -07001/*
2 * Kernel Probes (KProbes)
3 * arch/mips/kernel/kprobes.c
4 *
5 * Copyright 2006 Sony Corp.
6 * Copyright 2010 Cavium Networks
7 *
8 * Some portions copied from the powerpc version.
9 *
10 * Copyright (C) IBM Corporation, 2002, 2004
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2 of the License.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kprobes.h>
27#include <linux/preempt.h>
Maneesh Soni41dde782011-11-08 17:04:54 +053028#include <linux/uaccess.h>
David Daneyc1bf2072010-08-03 11:22:20 -070029#include <linux/kdebug.h>
30#include <linux/slab.h>
31
32#include <asm/ptrace.h>
33#include <asm/break.h>
34#include <asm/inst.h>
35
36static const union mips_instruction breakpoint_insn = {
37 .b_format = {
38 .opcode = spec_op,
39 .code = BRK_KPROBE_BP,
40 .func = break_op
41 }
42};
43
44static const union mips_instruction breakpoint2_insn = {
45 .b_format = {
46 .opcode = spec_op,
47 .code = BRK_KPROBE_SSTEPBP,
48 .func = break_op
49 }
50};
51
52DEFINE_PER_CPU(struct kprobe *, current_kprobe);
53DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
54
55static int __kprobes insn_has_delayslot(union mips_instruction insn)
56{
57 switch (insn.i_format.opcode) {
58
59 /*
60 * This group contains:
61 * jr and jalr are in r_format format.
62 */
63 case spec_op:
64 switch (insn.r_format.func) {
65 case jr_op:
66 case jalr_op:
67 break;
68 default:
69 goto insn_ok;
70 }
71
72 /*
73 * This group contains:
74 * bltz_op, bgez_op, bltzl_op, bgezl_op,
75 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
76 */
77 case bcond_op:
78
79 /*
80 * These are unconditional and in j_format.
81 */
82 case jal_op:
83 case j_op:
84
85 /*
86 * These are conditional and in i_format.
87 */
88 case beq_op:
89 case beql_op:
90 case bne_op:
91 case bnel_op:
92 case blez_op:
93 case blezl_op:
94 case bgtz_op:
95 case bgtzl_op:
96
97 /*
98 * These are the FPA/cp1 branch instructions.
99 */
100 case cop1_op:
101
102#ifdef CONFIG_CPU_CAVIUM_OCTEON
103 case lwc2_op: /* This is bbit0 on Octeon */
104 case ldc2_op: /* This is bbit032 on Octeon */
105 case swc2_op: /* This is bbit1 on Octeon */
106 case sdc2_op: /* This is bbit132 on Octeon */
107#endif
108 return 1;
109 default:
110 break;
111 }
112insn_ok:
113 return 0;
114}
115
116int __kprobes arch_prepare_kprobe(struct kprobe *p)
117{
118 union mips_instruction insn;
119 union mips_instruction prev_insn;
120 int ret = 0;
121
David Daneyc1bf2072010-08-03 11:22:20 -0700122 insn = p->addr[0];
123
Maneesh Soni41dde782011-11-08 17:04:54 +0530124 if (insn_has_delayslot(insn)) {
125 pr_notice("Kprobes for branch and jump instructions are not"
126 "supported\n");
127 ret = -EINVAL;
128 goto out;
129 }
130
131 if ((probe_kernel_read(&prev_insn, p->addr - 1,
132 sizeof(mips_instruction)) == 0) &&
133 insn_has_delayslot(prev_insn)) {
134 pr_notice("Kprobes for branch delayslot are not supported\n");
David Daneyc1bf2072010-08-03 11:22:20 -0700135 ret = -EINVAL;
136 goto out;
137 }
138
139 /* insn: must be on special executable page on mips. */
140 p->ainsn.insn = get_insn_slot();
141 if (!p->ainsn.insn) {
142 ret = -ENOMEM;
143 goto out;
144 }
145
146 /*
147 * In the kprobe->ainsn.insn[] array we store the original
148 * instruction at index zero and a break trap instruction at
149 * index one.
150 */
151
152 memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
153 p->ainsn.insn[1] = breakpoint2_insn;
154 p->opcode = *p->addr;
155
156out:
157 return ret;
158}
159
160void __kprobes arch_arm_kprobe(struct kprobe *p)
161{
162 *p->addr = breakpoint_insn;
163 flush_insn_slot(p);
164}
165
166void __kprobes arch_disarm_kprobe(struct kprobe *p)
167{
168 *p->addr = p->opcode;
169 flush_insn_slot(p);
170}
171
172void __kprobes arch_remove_kprobe(struct kprobe *p)
173{
174 free_insn_slot(p->ainsn.insn, 0);
175}
176
177static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
178{
179 kcb->prev_kprobe.kp = kprobe_running();
180 kcb->prev_kprobe.status = kcb->kprobe_status;
181 kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
182 kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
183 kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
184}
185
186static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
187{
188 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
189 kcb->kprobe_status = kcb->prev_kprobe.status;
190 kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
191 kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
192 kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
193}
194
195static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
196 struct kprobe_ctlblk *kcb)
197{
198 __get_cpu_var(current_kprobe) = p;
199 kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
200 kcb->kprobe_saved_epc = regs->cp0_epc;
201}
202
203static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
204{
205 regs->cp0_status &= ~ST0_IE;
206
207 /* single step inline if the instruction is a break */
208 if (p->opcode.word == breakpoint_insn.word ||
209 p->opcode.word == breakpoint2_insn.word)
210 regs->cp0_epc = (unsigned long)p->addr;
211 else
212 regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
213}
214
215static int __kprobes kprobe_handler(struct pt_regs *regs)
216{
217 struct kprobe *p;
218 int ret = 0;
219 kprobe_opcode_t *addr;
220 struct kprobe_ctlblk *kcb;
221
222 addr = (kprobe_opcode_t *) regs->cp0_epc;
223
224 /*
225 * We don't want to be preempted for the entire
226 * duration of kprobe processing
227 */
228 preempt_disable();
229 kcb = get_kprobe_ctlblk();
230
231 /* Check we're not actually recursing */
232 if (kprobe_running()) {
233 p = get_kprobe(addr);
234 if (p) {
235 if (kcb->kprobe_status == KPROBE_HIT_SS &&
236 p->ainsn.insn->word == breakpoint_insn.word) {
237 regs->cp0_status &= ~ST0_IE;
238 regs->cp0_status |= kcb->kprobe_saved_SR;
239 goto no_kprobe;
240 }
241 /*
242 * We have reentered the kprobe_handler(), since
243 * another probe was hit while within the handler.
244 * We here save the original kprobes variables and
245 * just single step on the instruction of the new probe
246 * without calling any user handlers.
247 */
248 save_previous_kprobe(kcb);
249 set_current_kprobe(p, regs, kcb);
250 kprobes_inc_nmissed_count(p);
251 prepare_singlestep(p, regs);
252 kcb->kprobe_status = KPROBE_REENTER;
253 return 1;
254 } else {
255 if (addr->word != breakpoint_insn.word) {
256 /*
257 * The breakpoint instruction was removed by
258 * another cpu right after we hit, no further
259 * handling of this interrupt is appropriate
260 */
261 ret = 1;
262 goto no_kprobe;
263 }
264 p = __get_cpu_var(current_kprobe);
265 if (p->break_handler && p->break_handler(p, regs))
266 goto ss_probe;
267 }
268 goto no_kprobe;
269 }
270
271 p = get_kprobe(addr);
272 if (!p) {
273 if (addr->word != breakpoint_insn.word) {
274 /*
275 * The breakpoint instruction was removed right
276 * after we hit it. Another cpu has removed
277 * either a probepoint or a debugger breakpoint
278 * at this address. In either case, no further
279 * handling of this interrupt is appropriate.
280 */
281 ret = 1;
282 }
283 /* Not one of ours: let kernel handle it */
284 goto no_kprobe;
285 }
286
287 set_current_kprobe(p, regs, kcb);
288 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
289
290 if (p->pre_handler && p->pre_handler(p, regs)) {
291 /* handler has already set things up, so skip ss setup */
292 return 1;
293 }
294
295ss_probe:
296 prepare_singlestep(p, regs);
297 kcb->kprobe_status = KPROBE_HIT_SS;
298 return 1;
299
300no_kprobe:
301 preempt_enable_no_resched();
302 return ret;
303
304}
305
306/*
307 * Called after single-stepping. p->addr is the address of the
308 * instruction whose first byte has been replaced by the "break 0"
309 * instruction. To avoid the SMP problems that can occur when we
310 * temporarily put back the original opcode to single-step, we
311 * single-stepped a copy of the instruction. The address of this
312 * copy is p->ainsn.insn.
313 *
314 * This function prepares to return from the post-single-step
315 * breakpoint trap.
316 */
317static void __kprobes resume_execution(struct kprobe *p,
318 struct pt_regs *regs,
319 struct kprobe_ctlblk *kcb)
320{
321 unsigned long orig_epc = kcb->kprobe_saved_epc;
322 regs->cp0_epc = orig_epc + 4;
323}
324
325static inline int post_kprobe_handler(struct pt_regs *regs)
326{
327 struct kprobe *cur = kprobe_running();
328 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
329
330 if (!cur)
331 return 0;
332
333 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
334 kcb->kprobe_status = KPROBE_HIT_SSDONE;
335 cur->post_handler(cur, regs, 0);
336 }
337
338 resume_execution(cur, regs, kcb);
339
340 regs->cp0_status |= kcb->kprobe_saved_SR;
341
342 /* Restore back the original saved kprobes variables and continue. */
343 if (kcb->kprobe_status == KPROBE_REENTER) {
344 restore_previous_kprobe(kcb);
345 goto out;
346 }
347 reset_current_kprobe();
348out:
349 preempt_enable_no_resched();
350
351 return 1;
352}
353
354static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
355{
356 struct kprobe *cur = kprobe_running();
357 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
358
359 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
360 return 1;
361
362 if (kcb->kprobe_status & KPROBE_HIT_SS) {
363 resume_execution(cur, regs, kcb);
364 regs->cp0_status |= kcb->kprobe_old_SR;
365
366 reset_current_kprobe();
367 preempt_enable_no_resched();
368 }
369 return 0;
370}
371
372/*
373 * Wrapper routine for handling exceptions.
374 */
375int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
376 unsigned long val, void *data)
377{
378
379 struct die_args *args = (struct die_args *)data;
380 int ret = NOTIFY_DONE;
381
382 switch (val) {
383 case DIE_BREAK:
384 if (kprobe_handler(args->regs))
385 ret = NOTIFY_STOP;
386 break;
387 case DIE_SSTEPBP:
388 if (post_kprobe_handler(args->regs))
389 ret = NOTIFY_STOP;
390 break;
391
392 case DIE_PAGE_FAULT:
393 /* kprobe_running() needs smp_processor_id() */
394 preempt_disable();
395
396 if (kprobe_running()
397 && kprobe_fault_handler(args->regs, args->trapnr))
398 ret = NOTIFY_STOP;
399 preempt_enable();
400 break;
401 default:
402 break;
403 }
404 return ret;
405}
406
407int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
408{
409 struct jprobe *jp = container_of(p, struct jprobe, kp);
410 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
411
412 kcb->jprobe_saved_regs = *regs;
413 kcb->jprobe_saved_sp = regs->regs[29];
414
415 memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
416 MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
417
418 regs->cp0_epc = (unsigned long)(jp->entry);
419
420 return 1;
421}
422
423/* Defined in the inline asm below. */
424void jprobe_return_end(void);
425
426void __kprobes jprobe_return(void)
427{
428 /* Assembler quirk necessitates this '0,code' business. */
429 asm volatile(
430 "break 0,%0\n\t"
431 ".globl jprobe_return_end\n"
432 "jprobe_return_end:\n"
433 : : "n" (BRK_KPROBE_BP) : "memory");
434}
435
436int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
437{
438 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
439
440 if (regs->cp0_epc >= (unsigned long)jprobe_return &&
441 regs->cp0_epc <= (unsigned long)jprobe_return_end) {
442 *regs = kcb->jprobe_saved_regs;
443 memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
444 MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
445 preempt_enable_no_resched();
446
447 return 1;
448 }
449 return 0;
450}
451
452/*
453 * Function return probe trampoline:
454 * - init_kprobes() establishes a probepoint here
455 * - When the probed function returns, this probe causes the
456 * handlers to fire
457 */
458static void __used kretprobe_trampoline_holder(void)
459{
460 asm volatile(
461 ".set push\n\t"
462 /* Keep the assembler from reordering and placing JR here. */
463 ".set noreorder\n\t"
464 "nop\n\t"
465 ".global kretprobe_trampoline\n"
466 "kretprobe_trampoline:\n\t"
467 "nop\n\t"
468 ".set pop"
469 : : : "memory");
470}
471
472void kretprobe_trampoline(void);
473
474void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
475 struct pt_regs *regs)
476{
477 ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
478
479 /* Replace the return addr with trampoline addr */
480 regs->regs[31] = (unsigned long)kretprobe_trampoline;
481}
482
483/*
484 * Called when the probe at kretprobe trampoline is hit
485 */
486static int __kprobes trampoline_probe_handler(struct kprobe *p,
487 struct pt_regs *regs)
488{
489 struct kretprobe_instance *ri = NULL;
490 struct hlist_head *head, empty_rp;
491 struct hlist_node *node, *tmp;
492 unsigned long flags, orig_ret_address = 0;
493 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
494
495 INIT_HLIST_HEAD(&empty_rp);
496 kretprobe_hash_lock(current, &head, &flags);
497
498 /*
499 * It is possible to have multiple instances associated with a given
500 * task either because an multiple functions in the call path
501 * have a return probe installed on them, and/or more than one return
502 * return probe was registered for a target function.
503 *
504 * We can handle this because:
505 * - instances are always inserted at the head of the list
506 * - when multiple return probes are registered for the same
507 * function, the first instance's ret_addr will point to the
508 * real return address, and all the rest will point to
509 * kretprobe_trampoline
510 */
511 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
512 if (ri->task != current)
513 /* another task is sharing our hash bucket */
514 continue;
515
516 if (ri->rp && ri->rp->handler)
517 ri->rp->handler(ri, regs);
518
519 orig_ret_address = (unsigned long)ri->ret_addr;
520 recycle_rp_inst(ri, &empty_rp);
521
522 if (orig_ret_address != trampoline_address)
523 /*
524 * This is the real return address. Any other
525 * instances associated with this task are for
526 * other calls deeper on the call stack
527 */
528 break;
529 }
530
531 kretprobe_assert(ri, orig_ret_address, trampoline_address);
532 instruction_pointer(regs) = orig_ret_address;
533
534 reset_current_kprobe();
535 kretprobe_hash_unlock(current, &flags);
536 preempt_enable_no_resched();
537
538 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
539 hlist_del(&ri->hlist);
540 kfree(ri);
541 }
542 /*
543 * By returning a non-zero value, we are telling
544 * kprobe_handler() that we don't want the post_handler
545 * to run (and have re-enabled preemption)
546 */
547 return 1;
548}
549
550int __kprobes arch_trampoline_kprobe(struct kprobe *p)
551{
552 if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
553 return 1;
554
555 return 0;
556}
557
558static struct kprobe trampoline_p = {
559 .addr = (kprobe_opcode_t *)kretprobe_trampoline,
560 .pre_handler = trampoline_probe_handler
561};
562
563int __init arch_init_kprobes(void)
564{
565 return register_kprobe(&trampoline_p);
566}