Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 1 | /* |
| 2 | * SuperH KGDB support |
| 3 | * |
Paul Mundt | fd03e81 | 2012-04-10 13:42:56 +0900 | [diff] [blame^] | 4 | * Copyright (C) 2008 - 2012 Paul Mundt |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 5 | * |
| 6 | * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel. |
| 7 | * |
| 8 | * This file is subject to the terms and conditions of the GNU General Public |
| 9 | * License. See the file "COPYING" in the main directory of this archive |
| 10 | * for more details. |
| 11 | */ |
| 12 | #include <linux/kgdb.h> |
| 13 | #include <linux/kdebug.h> |
| 14 | #include <linux/irq.h> |
| 15 | #include <linux/io.h> |
| 16 | #include <asm/cacheflush.h> |
Paul Mundt | f03c486 | 2012-03-30 19:29:57 +0900 | [diff] [blame] | 17 | #include <asm/traps.h> |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 18 | |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 19 | /* Macros for single step instruction identification */ |
| 20 | #define OPCODE_BT(op) (((op) & 0xff00) == 0x8900) |
| 21 | #define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00) |
| 22 | #define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \ |
| 23 | (((op) & 0x7f ) << 1)) |
| 24 | #define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00) |
| 25 | #define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00) |
| 26 | #define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000) |
| 27 | #define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \ |
| 28 | (((op) & 0x7ff) << 1)) |
| 29 | #define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023) |
| 30 | #define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8) |
| 31 | #define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000) |
| 32 | #define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \ |
| 33 | (((op) & 0x7ff) << 1)) |
| 34 | #define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003) |
| 35 | #define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf) |
| 36 | #define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b) |
| 37 | #define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf) |
| 38 | #define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b) |
| 39 | #define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf) |
| 40 | #define OPCODE_RTS(op) ((op) == 0xb) |
| 41 | #define OPCODE_RTE(op) ((op) == 0x2b) |
| 42 | |
| 43 | #define SR_T_BIT_MASK 0x1 |
| 44 | #define STEP_OPCODE 0xc33d |
| 45 | |
| 46 | /* Calculate the new address for after a step */ |
| 47 | static short *get_step_address(struct pt_regs *linux_regs) |
| 48 | { |
Paul Mundt | 2bcfffa | 2009-05-09 16:02:08 +0900 | [diff] [blame] | 49 | insn_size_t op = __raw_readw(linux_regs->pc); |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 50 | long addr; |
| 51 | |
| 52 | /* BT */ |
| 53 | if (OPCODE_BT(op)) { |
| 54 | if (linux_regs->sr & SR_T_BIT_MASK) |
| 55 | addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); |
| 56 | else |
| 57 | addr = linux_regs->pc + 2; |
| 58 | } |
| 59 | |
| 60 | /* BTS */ |
| 61 | else if (OPCODE_BTS(op)) { |
| 62 | if (linux_regs->sr & SR_T_BIT_MASK) |
| 63 | addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); |
| 64 | else |
| 65 | addr = linux_regs->pc + 4; /* Not in delay slot */ |
| 66 | } |
| 67 | |
| 68 | /* BF */ |
| 69 | else if (OPCODE_BF(op)) { |
| 70 | if (!(linux_regs->sr & SR_T_BIT_MASK)) |
| 71 | addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); |
| 72 | else |
| 73 | addr = linux_regs->pc + 2; |
| 74 | } |
| 75 | |
| 76 | /* BFS */ |
| 77 | else if (OPCODE_BFS(op)) { |
| 78 | if (!(linux_regs->sr & SR_T_BIT_MASK)) |
| 79 | addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); |
| 80 | else |
| 81 | addr = linux_regs->pc + 4; /* Not in delay slot */ |
| 82 | } |
| 83 | |
| 84 | /* BRA */ |
| 85 | else if (OPCODE_BRA(op)) |
| 86 | addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op); |
| 87 | |
| 88 | /* BRAF */ |
| 89 | else if (OPCODE_BRAF(op)) |
| 90 | addr = linux_regs->pc + 4 |
| 91 | + linux_regs->regs[OPCODE_BRAF_REG(op)]; |
| 92 | |
| 93 | /* BSR */ |
| 94 | else if (OPCODE_BSR(op)) |
| 95 | addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op); |
| 96 | |
| 97 | /* BSRF */ |
| 98 | else if (OPCODE_BSRF(op)) |
| 99 | addr = linux_regs->pc + 4 |
| 100 | + linux_regs->regs[OPCODE_BSRF_REG(op)]; |
| 101 | |
| 102 | /* JMP */ |
| 103 | else if (OPCODE_JMP(op)) |
| 104 | addr = linux_regs->regs[OPCODE_JMP_REG(op)]; |
| 105 | |
| 106 | /* JSR */ |
| 107 | else if (OPCODE_JSR(op)) |
| 108 | addr = linux_regs->regs[OPCODE_JSR_REG(op)]; |
| 109 | |
| 110 | /* RTS */ |
| 111 | else if (OPCODE_RTS(op)) |
| 112 | addr = linux_regs->pr; |
| 113 | |
| 114 | /* RTE */ |
| 115 | else if (OPCODE_RTE(op)) |
| 116 | addr = linux_regs->regs[15]; |
| 117 | |
| 118 | /* Other */ |
| 119 | else |
| 120 | addr = linux_regs->pc + instruction_size(op); |
| 121 | |
| 122 | flush_icache_range(addr, addr + instruction_size(op)); |
| 123 | return (short *)addr; |
| 124 | } |
| 125 | |
| 126 | /* |
| 127 | * Replace the instruction immediately after the current instruction |
| 128 | * (i.e. next in the expected flow of control) with a trap instruction, |
| 129 | * so that returning will cause only a single instruction to be executed. |
| 130 | * Note that this model is slightly broken for instructions with delay |
| 131 | * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the |
| 132 | * instruction in the delay slot will be executed. |
| 133 | */ |
| 134 | |
| 135 | static unsigned long stepped_address; |
Paul Mundt | 2bcfffa | 2009-05-09 16:02:08 +0900 | [diff] [blame] | 136 | static insn_size_t stepped_opcode; |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 137 | |
| 138 | static void do_single_step(struct pt_regs *linux_regs) |
| 139 | { |
| 140 | /* Determine where the target instruction will send us to */ |
| 141 | unsigned short *addr = get_step_address(linux_regs); |
| 142 | |
| 143 | stepped_address = (int)addr; |
| 144 | |
| 145 | /* Replace it */ |
| 146 | stepped_opcode = __raw_readw((long)addr); |
| 147 | *addr = STEP_OPCODE; |
| 148 | |
| 149 | /* Flush and return */ |
| 150 | flush_icache_range((long)addr, (long)addr + |
| 151 | instruction_size(stepped_opcode)); |
| 152 | } |
| 153 | |
| 154 | /* Undo a single step */ |
| 155 | static void undo_single_step(struct pt_regs *linux_regs) |
| 156 | { |
| 157 | /* If we have stepped, put back the old instruction */ |
| 158 | /* Use stepped_address in case we stopped elsewhere */ |
| 159 | if (stepped_opcode != 0) { |
| 160 | __raw_writew(stepped_opcode, stepped_address); |
| 161 | flush_icache_range(stepped_address, stepped_address + 2); |
| 162 | } |
| 163 | |
| 164 | stepped_opcode = 0; |
| 165 | } |
| 166 | |
Paul Mundt | fd03e81 | 2012-04-10 13:42:56 +0900 | [diff] [blame^] | 167 | struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { |
| 168 | { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) }, |
| 169 | { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) }, |
| 170 | { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) }, |
| 171 | { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) }, |
| 172 | { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) }, |
| 173 | { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) }, |
| 174 | { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) }, |
| 175 | { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) }, |
| 176 | { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) }, |
| 177 | { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) }, |
| 178 | { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) }, |
| 179 | { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) }, |
| 180 | { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) }, |
| 181 | { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) }, |
| 182 | { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) }, |
| 183 | { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) }, |
| 184 | { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc) }, |
| 185 | { "pr", GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) }, |
| 186 | { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, sr) }, |
| 187 | { "gbr", GDB_SIZEOF_REG, offsetof(struct pt_regs, gbr) }, |
| 188 | { "mach", GDB_SIZEOF_REG, offsetof(struct pt_regs, mach) }, |
| 189 | { "macl", GDB_SIZEOF_REG, offsetof(struct pt_regs, macl) }, |
| 190 | { "vbr", GDB_SIZEOF_REG, -1 }, |
| 191 | }; |
| 192 | |
| 193 | int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 194 | { |
Paul Mundt | fd03e81 | 2012-04-10 13:42:56 +0900 | [diff] [blame^] | 195 | if (regno < 0 || regno >= DBG_MAX_REG_NUM) |
| 196 | return -EINVAL; |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 197 | |
Paul Mundt | fd03e81 | 2012-04-10 13:42:56 +0900 | [diff] [blame^] | 198 | if (dbg_reg_def[regno].offset != -1) |
| 199 | memcpy((void *)regs + dbg_reg_def[regno].offset, mem, |
| 200 | dbg_reg_def[regno].size); |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 201 | |
Paul Mundt | fd03e81 | 2012-04-10 13:42:56 +0900 | [diff] [blame^] | 202 | return 0; |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 203 | } |
| 204 | |
Paul Mundt | fd03e81 | 2012-04-10 13:42:56 +0900 | [diff] [blame^] | 205 | char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 206 | { |
Paul Mundt | fd03e81 | 2012-04-10 13:42:56 +0900 | [diff] [blame^] | 207 | if (regno >= DBG_MAX_REG_NUM || regno < 0) |
| 208 | return NULL; |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 209 | |
Paul Mundt | fd03e81 | 2012-04-10 13:42:56 +0900 | [diff] [blame^] | 210 | if (dbg_reg_def[regno].size != -1) |
| 211 | memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, |
| 212 | dbg_reg_def[regno].size); |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 213 | |
Paul Mundt | fd03e81 | 2012-04-10 13:42:56 +0900 | [diff] [blame^] | 214 | switch (regno) { |
| 215 | case GDB_VBR: |
| 216 | __asm__ __volatile__ ("stc vbr, %0" : "=r" (mem)); |
| 217 | break; |
| 218 | } |
| 219 | |
| 220 | return dbg_reg_def[regno].name; |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) |
| 224 | { |
| 225 | gdb_regs[GDB_R15] = p->thread.sp; |
| 226 | gdb_regs[GDB_PC] = p->thread.pc; |
| 227 | } |
| 228 | |
| 229 | int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, |
| 230 | char *remcomInBuffer, char *remcomOutBuffer, |
| 231 | struct pt_regs *linux_regs) |
| 232 | { |
| 233 | unsigned long addr; |
| 234 | char *ptr; |
| 235 | |
| 236 | /* Undo any stepping we may have done */ |
| 237 | undo_single_step(linux_regs); |
| 238 | |
| 239 | switch (remcomInBuffer[0]) { |
| 240 | case 'c': |
| 241 | case 's': |
| 242 | /* try to read optional parameter, pc unchanged if no parm */ |
| 243 | ptr = &remcomInBuffer[1]; |
| 244 | if (kgdb_hex2long(&ptr, &addr)) |
| 245 | linux_regs->pc = addr; |
| 246 | case 'D': |
| 247 | case 'k': |
| 248 | atomic_set(&kgdb_cpu_doing_single_step, -1); |
| 249 | |
| 250 | if (remcomInBuffer[0] == 's') { |
| 251 | do_single_step(linux_regs); |
| 252 | kgdb_single_step = 1; |
| 253 | |
| 254 | atomic_set(&kgdb_cpu_doing_single_step, |
| 255 | raw_smp_processor_id()); |
| 256 | } |
| 257 | |
| 258 | return 0; |
| 259 | } |
| 260 | |
| 261 | /* this means that we do not want to exit from the handler: */ |
| 262 | return -1; |
| 263 | } |
| 264 | |
Jason Wessel | 489022c | 2010-05-20 21:04:20 -0500 | [diff] [blame] | 265 | unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) |
| 266 | { |
| 267 | if (exception == 60) |
| 268 | return instruction_pointer(regs) - 2; |
| 269 | return instruction_pointer(regs); |
| 270 | } |
| 271 | |
| 272 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) |
| 273 | { |
| 274 | regs->pc = ip; |
| 275 | } |
| 276 | |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 277 | /* |
| 278 | * The primary entry points for the kgdb debug trap table entries. |
| 279 | */ |
| 280 | BUILD_TRAP_HANDLER(singlestep) |
| 281 | { |
| 282 | unsigned long flags; |
| 283 | TRAP_HANDLER_DECL; |
| 284 | |
| 285 | local_irq_save(flags); |
| 286 | regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); |
Jason Wessel | 489022c | 2010-05-20 21:04:20 -0500 | [diff] [blame] | 287 | kgdb_handle_exception(0, SIGTRAP, 0, regs); |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 288 | local_irq_restore(flags); |
| 289 | } |
| 290 | |
Paul Mundt | 14f087d | 2012-04-10 12:39:55 +0900 | [diff] [blame] | 291 | static void kgdb_call_nmi_hook(void *ignored) |
| 292 | { |
| 293 | kgdb_nmicallback(raw_smp_processor_id(), NULL); |
| 294 | } |
| 295 | |
| 296 | void kgdb_roundup_cpus(unsigned long flags) |
| 297 | { |
| 298 | local_irq_enable(); |
| 299 | smp_call_function(kgdb_call_nmi_hook, NULL, 0); |
| 300 | local_irq_disable(); |
| 301 | } |
| 302 | |
Paul Mundt | 22648735 | 2009-12-22 12:44:14 +0900 | [diff] [blame] | 303 | static int __kgdb_notify(struct die_args *args, unsigned long cmd) |
| 304 | { |
| 305 | int ret; |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 306 | |
Paul Mundt | 22648735 | 2009-12-22 12:44:14 +0900 | [diff] [blame] | 307 | switch (cmd) { |
| 308 | case DIE_BREAKPOINT: |
| 309 | /* |
| 310 | * This means a user thread is single stepping |
| 311 | * a system call which should be ignored |
| 312 | */ |
| 313 | if (test_thread_flag(TIF_SINGLESTEP)) |
| 314 | return NOTIFY_DONE; |
| 315 | |
| 316 | ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr, |
| 317 | args->err, args->regs); |
| 318 | if (ret) |
| 319 | return NOTIFY_DONE; |
| 320 | |
| 321 | break; |
| 322 | } |
| 323 | |
| 324 | return NOTIFY_STOP; |
| 325 | } |
| 326 | |
| 327 | static int |
| 328 | kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 329 | { |
| 330 | unsigned long flags; |
Paul Mundt | 22648735 | 2009-12-22 12:44:14 +0900 | [diff] [blame] | 331 | int ret; |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 332 | |
| 333 | local_irq_save(flags); |
Paul Mundt | 22648735 | 2009-12-22 12:44:14 +0900 | [diff] [blame] | 334 | ret = __kgdb_notify(ptr, cmd); |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 335 | local_irq_restore(flags); |
Paul Mundt | 22648735 | 2009-12-22 12:44:14 +0900 | [diff] [blame] | 336 | |
| 337 | return ret; |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 338 | } |
| 339 | |
Paul Mundt | 22648735 | 2009-12-22 12:44:14 +0900 | [diff] [blame] | 340 | static struct notifier_block kgdb_notifier = { |
| 341 | .notifier_call = kgdb_notify, |
| 342 | |
| 343 | /* |
| 344 | * Lowest-prio notifier priority, we want to be notified last: |
| 345 | */ |
| 346 | .priority = -INT_MAX, |
| 347 | }; |
| 348 | |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 349 | int kgdb_arch_init(void) |
| 350 | { |
Paul Mundt | 22648735 | 2009-12-22 12:44:14 +0900 | [diff] [blame] | 351 | return register_die_notifier(&kgdb_notifier); |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 352 | } |
| 353 | |
| 354 | void kgdb_arch_exit(void) |
| 355 | { |
Paul Mundt | 22648735 | 2009-12-22 12:44:14 +0900 | [diff] [blame] | 356 | unregister_die_notifier(&kgdb_notifier); |
Paul Mundt | ab6e570 | 2008-12-11 18:46:46 +0900 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | struct kgdb_arch arch_kgdb_ops = { |
| 360 | /* Breakpoint instruction: trapa #0x3c */ |
| 361 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
| 362 | .gdb_bpt_instr = { 0x3c, 0xc3 }, |
| 363 | #else |
| 364 | .gdb_bpt_instr = { 0xc3, 0x3c }, |
| 365 | #endif |
| 366 | }; |