Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 1 | #include <linux/highmem.h> |
| 2 | #include <linux/kdebug.h> |
| 3 | #include <linux/types.h> |
| 4 | #include <linux/notifier.h> |
| 5 | #include <linux/sched.h> |
| 6 | #include <linux/uprobes.h> |
| 7 | |
| 8 | #include <asm/branch.h> |
| 9 | #include <asm/cpu-features.h> |
| 10 | #include <asm/ptrace.h> |
| 11 | #include <asm/inst.h> |
| 12 | |
| 13 | static inline int insn_has_delay_slot(const union mips_instruction insn) |
| 14 | { |
| 15 | switch (insn.i_format.opcode) { |
| 16 | /* |
| 17 | * jr and jalr are in r_format format. |
| 18 | */ |
| 19 | case spec_op: |
| 20 | switch (insn.r_format.func) { |
| 21 | case jalr_op: |
| 22 | case jr_op: |
| 23 | return 1; |
| 24 | } |
| 25 | break; |
| 26 | |
| 27 | /* |
| 28 | * This group contains: |
| 29 | * bltz_op, bgez_op, bltzl_op, bgezl_op, |
| 30 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. |
| 31 | */ |
| 32 | case bcond_op: |
| 33 | switch (insn.i_format.rt) { |
| 34 | case bltz_op: |
| 35 | case bltzl_op: |
| 36 | case bgez_op: |
| 37 | case bgezl_op: |
| 38 | case bltzal_op: |
| 39 | case bltzall_op: |
| 40 | case bgezal_op: |
| 41 | case bgezall_op: |
| 42 | case bposge32_op: |
| 43 | return 1; |
| 44 | } |
| 45 | break; |
| 46 | |
| 47 | /* |
| 48 | * These are unconditional and in j_format. |
| 49 | */ |
| 50 | case jal_op: |
| 51 | case j_op: |
| 52 | case beq_op: |
| 53 | case beql_op: |
| 54 | case bne_op: |
| 55 | case bnel_op: |
| 56 | case blez_op: /* not really i_format */ |
| 57 | case blezl_op: |
| 58 | case bgtz_op: |
| 59 | case bgtzl_op: |
| 60 | return 1; |
| 61 | |
| 62 | /* |
| 63 | * And now the FPA/cp1 branch instructions. |
| 64 | */ |
| 65 | case cop1_op: |
| 66 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
| 67 | case lwc2_op: /* This is bbit0 on Octeon */ |
| 68 | case ldc2_op: /* This is bbit032 on Octeon */ |
| 69 | case swc2_op: /* This is bbit1 on Octeon */ |
| 70 | case sdc2_op: /* This is bbit132 on Octeon */ |
| 71 | #endif |
| 72 | return 1; |
| 73 | } |
| 74 | |
| 75 | return 0; |
| 76 | } |
| 77 | |
| 78 | /** |
| 79 | * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. |
| 80 | * @mm: the probed address space. |
| 81 | * @arch_uprobe: the probepoint information. |
| 82 | * @addr: virtual address at which to install the probepoint |
| 83 | * Return 0 on success or a -ve number on error. |
| 84 | */ |
| 85 | int arch_uprobe_analyze_insn(struct arch_uprobe *aup, |
| 86 | struct mm_struct *mm, unsigned long addr) |
| 87 | { |
| 88 | union mips_instruction inst; |
| 89 | |
| 90 | /* |
| 91 | * For the time being this also blocks attempts to use uprobes with |
| 92 | * MIPS16 and microMIPS. |
| 93 | */ |
| 94 | if (addr & 0x03) |
| 95 | return -EINVAL; |
| 96 | |
| 97 | inst.word = aup->insn[0]; |
| 98 | aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)]; |
| 99 | aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */ |
| 100 | |
| 101 | return 0; |
| 102 | } |
| 103 | |
| 104 | /** |
| 105 | * is_trap_insn - check if the instruction is a trap variant |
| 106 | * @insn: instruction to be checked. |
| 107 | * Returns true if @insn is a trap variant. |
| 108 | * |
| 109 | * This definition overrides the weak definition in kernel/events/uprobes.c. |
| 110 | * and is needed for the case where an architecture has multiple trap |
| 111 | * instructions (like PowerPC or MIPS). We treat BREAK just like the more |
| 112 | * modern conditional trap instructions. |
| 113 | */ |
| 114 | bool is_trap_insn(uprobe_opcode_t *insn) |
| 115 | { |
| 116 | union mips_instruction inst; |
| 117 | |
| 118 | inst.word = *insn; |
| 119 | |
| 120 | switch (inst.i_format.opcode) { |
| 121 | case spec_op: |
| 122 | switch (inst.r_format.func) { |
| 123 | case break_op: |
| 124 | case teq_op: |
| 125 | case tge_op: |
| 126 | case tgeu_op: |
| 127 | case tlt_op: |
| 128 | case tltu_op: |
| 129 | case tne_op: |
| 130 | return 1; |
| 131 | } |
| 132 | break; |
| 133 | |
| 134 | case bcond_op: /* Yes, really ... */ |
| 135 | switch (inst.u_format.rt) { |
| 136 | case teqi_op: |
| 137 | case tgei_op: |
| 138 | case tgeiu_op: |
| 139 | case tlti_op: |
| 140 | case tltiu_op: |
| 141 | case tnei_op: |
| 142 | return 1; |
| 143 | } |
| 144 | break; |
| 145 | } |
| 146 | |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | #define UPROBE_TRAP_NR ULONG_MAX |
| 151 | |
| 152 | /* |
| 153 | * arch_uprobe_pre_xol - prepare to execute out of line. |
| 154 | * @auprobe: the probepoint information. |
| 155 | * @regs: reflects the saved user state of current task. |
| 156 | */ |
| 157 | int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) |
| 158 | { |
| 159 | struct uprobe_task *utask = current->utask; |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 160 | |
| 161 | /* |
| 162 | * Now find the EPC where to resume after the breakpoint has been |
| 163 | * dealt with. This may require emulation of a branch. |
| 164 | */ |
| 165 | aup->resume_epc = regs->cp0_epc + 4; |
| 166 | if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) { |
| 167 | unsigned long epc; |
| 168 | |
| 169 | epc = regs->cp0_epc; |
Marcin Nowakowski | ca86c9e | 2016-09-22 15:38:33 +0200 | [diff] [blame] | 170 | __compute_return_epc_for_insn(regs, |
| 171 | (union mips_instruction) aup->insn[0]); |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 172 | aup->resume_epc = regs->cp0_epc; |
| 173 | } |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 174 | utask->autask.saved_trap_nr = current->thread.trap_nr; |
| 175 | current->thread.trap_nr = UPROBE_TRAP_NR; |
| 176 | regs->cp0_epc = current->utask->xol_vaddr; |
| 177 | |
| 178 | return 0; |
| 179 | } |
| 180 | |
| 181 | int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs) |
| 182 | { |
| 183 | struct uprobe_task *utask = current->utask; |
| 184 | |
| 185 | current->thread.trap_nr = utask->autask.saved_trap_nr; |
| 186 | regs->cp0_epc = aup->resume_epc; |
| 187 | |
| 188 | return 0; |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * If xol insn itself traps and generates a signal(Say, |
| 193 | * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped |
| 194 | * instruction jumps back to its own address. It is assumed that anything |
| 195 | * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. |
| 196 | * |
| 197 | * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, |
| 198 | * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to |
| 199 | * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). |
| 200 | */ |
| 201 | bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) |
| 202 | { |
| 203 | if (tsk->thread.trap_nr != UPROBE_TRAP_NR) |
| 204 | return true; |
| 205 | |
| 206 | return false; |
| 207 | } |
| 208 | |
| 209 | int arch_uprobe_exception_notify(struct notifier_block *self, |
| 210 | unsigned long val, void *data) |
| 211 | { |
| 212 | struct die_args *args = data; |
| 213 | struct pt_regs *regs = args->regs; |
| 214 | |
| 215 | /* regs == NULL is a kernel bug */ |
| 216 | if (WARN_ON(!regs)) |
| 217 | return NOTIFY_DONE; |
| 218 | |
| 219 | /* We are only interested in userspace traps */ |
| 220 | if (!user_mode(regs)) |
| 221 | return NOTIFY_DONE; |
| 222 | |
| 223 | switch (val) { |
Marcin Nowakowski | 2809328 | 2016-08-11 09:02:30 +0200 | [diff] [blame] | 224 | case DIE_UPROBE: |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 225 | if (uprobe_pre_sstep_notifier(regs)) |
| 226 | return NOTIFY_STOP; |
| 227 | break; |
| 228 | case DIE_UPROBE_XOL: |
| 229 | if (uprobe_post_sstep_notifier(regs)) |
| 230 | return NOTIFY_STOP; |
| 231 | default: |
| 232 | break; |
| 233 | } |
| 234 | |
| 235 | return 0; |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * This function gets called when XOL instruction either gets trapped or |
| 240 | * the thread has a fatal signal. Reset the instruction pointer to its |
| 241 | * probed address for the potential restart or for post mortem analysis. |
| 242 | */ |
| 243 | void arch_uprobe_abort_xol(struct arch_uprobe *aup, |
| 244 | struct pt_regs *regs) |
| 245 | { |
| 246 | struct uprobe_task *utask = current->utask; |
| 247 | |
| 248 | instruction_pointer_set(regs, utask->vaddr); |
| 249 | } |
| 250 | |
| 251 | unsigned long arch_uretprobe_hijack_return_addr( |
| 252 | unsigned long trampoline_vaddr, struct pt_regs *regs) |
| 253 | { |
| 254 | unsigned long ra; |
| 255 | |
| 256 | ra = regs->regs[31]; |
| 257 | |
| 258 | /* Replace the return address with the trampoline address */ |
Marcin Nowakowski | db06068 | 2016-09-22 15:38:31 +0200 | [diff] [blame] | 259 | regs->regs[31] = trampoline_vaddr; |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 260 | |
| 261 | return ra; |
| 262 | } |
| 263 | |
| 264 | /** |
| 265 | * set_swbp - store breakpoint at a given address. |
| 266 | * @auprobe: arch specific probepoint information. |
| 267 | * @mm: the probed process address space. |
| 268 | * @vaddr: the virtual address to insert the opcode. |
| 269 | * |
| 270 | * For mm @mm, store the breakpoint instruction at @vaddr. |
| 271 | * Return 0 (success) or a negative errno. |
| 272 | * |
| 273 | * This version overrides the weak version in kernel/events/uprobes.c. |
| 274 | * It is required to handle MIPS16 and microMIPS. |
| 275 | */ |
| 276 | int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, |
| 277 | unsigned long vaddr) |
| 278 | { |
| 279 | return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); |
| 280 | } |
| 281 | |
Ralf Baechle | 40e084a | 2015-07-29 22:44:53 +0200 | [diff] [blame] | 282 | void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, |
| 283 | void *src, unsigned long len) |
| 284 | { |
| 285 | void *kaddr; |
| 286 | |
| 287 | /* Initialize the slot */ |
| 288 | kaddr = kmap_atomic(page); |
| 289 | memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); |
| 290 | kunmap_atomic(kaddr); |
| 291 | |
| 292 | /* |
| 293 | * The MIPS version of flush_icache_range will operate safely on |
| 294 | * user space addresses and more importantly, it doesn't require a |
| 295 | * VMA argument. |
| 296 | */ |
| 297 | flush_icache_range(vaddr, vaddr + len); |
| 298 | } |
| 299 | |
| 300 | /** |
| 301 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs |
| 302 | * @regs: Reflects the saved state of the task after it has hit a breakpoint |
| 303 | * instruction. |
| 304 | * Return the address of the breakpoint instruction. |
| 305 | * |
| 306 | * This overrides the weak version in kernel/events/uprobes.c. |
| 307 | */ |
| 308 | unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) |
| 309 | { |
| 310 | return instruction_pointer(regs); |
| 311 | } |
| 312 | |
| 313 | /* |
| 314 | * See if the instruction can be emulated. |
| 315 | * Returns true if instruction was emulated, false otherwise. |
| 316 | * |
| 317 | * For now we always emulate so this function just returns 0. |
| 318 | */ |
| 319 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 320 | { |
| 321 | return 0; |
| 322 | } |