| Juan Cespedes | d914a20 | 2004-11-10 00:15:33 +0100 | [diff] [blame] | 1 | #include <gelf.h> |
| Juan Cespedes | a7af00d | 2009-07-26 13:23:18 +0200 | [diff] [blame] | 2 | #include <sys/ptrace.h> |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 3 | #include <errno.h> |
| 4 | #include <error.h> |
| 5 | #include <inttypes.h> |
| 6 | #include <assert.h> |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 7 | #include <string.h> |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 8 | |
| Petr Machata | 366c2f4 | 2012-02-09 19:34:36 +0100 | [diff] [blame] | 9 | #include "proc.h" |
| Juan Cespedes | f728123 | 2009-06-25 16:11:21 +0200 | [diff] [blame] | 10 | #include "common.h" |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 11 | #include "library.h" |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 12 | #include "breakpoint.h" |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 13 | #include "linux-gnu/trace.h" |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 14 | |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 15 | /* There are two PLT types on 32-bit PPC: old-style, BSS PLT, and |
| 16 | * new-style "secure" PLT. We can tell one from the other by the |
| 17 | * flags on the .plt section. If it's +X (executable), it's BSS PLT, |
| 18 | * otherwise it's secure. |
| 19 | * |
| 20 | * BSS PLT works the same way as most architectures: the .plt section |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 21 | * contains trampolines and we put breakpoints to those. If not |
| 22 | * prelinked, .plt contains zeroes, and dynamic linker fills in the |
| 23 | * initial set of trampolines, which means that we need to delay |
| 24 | * enabling breakpoints until after binary entry point is hit. |
| 25 | * Additionally, after first call, dynamic linker updates .plt with |
| 26 | * branch to resolved address. That means that on first hit, we must |
| 27 | * do something similar to the PPC64 gambit described below. |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 28 | * |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 29 | * With secure PLT, the .plt section doesn't contain instructions but |
| 30 | * addresses. The real PLT table is stored in .text. Addresses of |
| 31 | * those PLT entries can be computed, and apart from the fact that |
| 32 | * they are in .text, they are ordinary PLT entries. |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 33 | * |
| 34 | * 64-bit PPC is more involved. Program linker creates for each |
| 35 | * library call a _stub_ symbol named xxxxxxxx.plt_call.<callee> |
| 36 | * (where xxxxxxxx is a hexadecimal number). That stub does the call |
| 37 | * dispatch: it loads an address of a function to call from the |
| 38 | * section .plt, and branches. PLT entries themselves are essentially |
| 39 | * a curried call to the resolver. When the symbol is resolved, the |
| 40 | * resolver updates the value stored in .plt, and the next time |
| 41 | * around, the stub calls the library function directly. So we make |
| 42 | * at most one trip (none if the binary is prelinked) through each PLT |
| 43 | * entry, and correspondingly that is useless as a breakpoint site. |
| 44 | * |
| 45 | * Note the three confusing terms: stubs (that play the role of PLT |
| 46 | * entries), PLT entries, .plt section. |
| 47 | * |
| 48 | * We first check symbol tables and see if we happen to have stub |
| 49 | * symbols available. If yes we just put breakpoints to those, and |
| 50 | * treat them as usual breakpoints. The only tricky part is realizing |
| 51 | * that there can be more than one breakpoint per symbol. |
| 52 | * |
| 53 | * The case that we don't have the stub symbols available is harder. |
| 54 | * The following scheme uses two kinds of PLT breakpoints: unresolved |
| 55 | * and resolved (to some address). When the process starts (or when |
| 56 | * we attach), we distribute unresolved PLT breakpoints to the PLT |
| 57 | * entries (not stubs). Then we look in .plt, and for each entry |
| 58 | * whose value is different than the corresponding PLT entry address, |
| 59 | * we assume it was already resolved, and convert the breakpoint to |
| 60 | * resolved. We also rewrite the resolved value in .plt back to the |
| 61 | * PLT address. |
| 62 | * |
| 63 | * When a PLT entry hits a resolved breakpoint (which happens because |
| Petr Machata | 19c0f29 | 2012-04-15 19:09:02 +0200 | [diff] [blame] | 64 | * we rewrite .plt with the original unresolved addresses), we move |
| 65 | * the instruction pointer to the corresponding address and continue |
| 66 | * the process as if nothing happened. |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 67 | * |
| 68 | * When unresolved PLT entry is called for the first time, we need to |
| 69 | * catch the new value that the resolver will write to a .plt slot. |
| 70 | * We also need to prevent another thread from racing through and |
| 71 | * taking the branch without ltrace noticing. So when unresolved PLT |
| 72 | * entry hits, we have to stop all threads. We then single-step |
| 73 | * through the resolver, until the .plt slot changes. When it does, |
| 74 | * we treat it the same way as above: convert the PLT breakpoint to |
| 75 | * resolved, and rewrite the .plt value back to PLT address. We then |
| 76 | * start all threads again. |
| 77 | * |
| Petr Machata | 19c0f29 | 2012-04-15 19:09:02 +0200 | [diff] [blame] | 78 | * As an optimization, we remember the address where the address was |
| 79 | * resolved, and put a breakpoint there. The next time around (when |
| 80 | * the next PLT entry is to be resolved), instead of single-stepping |
| 81 | * through half the dynamic linker, we just let the thread run and hit |
| 82 | * this breakpoint. When it hits, we know the PLT entry was resolved. |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 83 | * |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 84 | * N.B. It's tempting to try to emulate the instruction that updates |
| 85 | * .plt. We would compute the resolved address, and instead of |
| 86 | * letting the dynamic linker put it in .plt, we would resolve the |
| 87 | * breakpoint to that address. This way we wouldn't need to stop |
| 88 | * other threads. However that instruction may turn out to be a sync, |
| 89 | * and in general, may be any instruction between the actual write and |
| 90 | * the following sync. XXX TODO that means that we need to put the |
| 91 | * post-enable breakpoint at the following sync, not to the |
| 92 | * instruction itself (unless it's a sync already). |
| Petr Machata | 19c0f29 | 2012-04-15 19:09:02 +0200 | [diff] [blame] | 93 | * |
| 94 | * XXX TODO If we have hardware watch point, we might put a read watch |
| 95 | * on .plt slot, and discover the offenders this way. I don't know |
| 96 | * the details, but I assume at most a handful (like, one or two, if |
| 97 | * available at all) addresses may be watched at a time, and thus this |
| 98 | * would be used as an amendment of the above rather than full-on |
| 99 | * solution to PLT tracing on PPC. |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 100 | */ |
| 101 | |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 102 | #define PPC_PLT_STUB_SIZE 16 |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 103 | #define PPC64_PLT_STUB_SIZE 8 //xxx |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 104 | |
| 105 | static inline int |
| Petr Machata | 4e2073f | 2012-03-21 05:15:44 +0100 | [diff] [blame] | 106 | host_powerpc64() |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 107 | { |
| 108 | #ifdef __powerpc64__ |
| 109 | return 1; |
| 110 | #else |
| 111 | return 0; |
| 112 | #endif |
| 113 | } |
| 114 | |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 115 | static int |
| 116 | read_target_4(struct Process *proc, target_address_t addr, uint32_t *lp) |
| 117 | { |
| 118 | unsigned long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0); |
| 119 | if (l == -1UL && errno) |
| 120 | return -1; |
| 121 | if (host_powerpc64()) |
| 122 | l >>= 32; |
| 123 | *lp = l; |
| 124 | return 0; |
| 125 | } |
| 126 | |
| 127 | static int |
| 128 | read_target_8(struct Process *proc, target_address_t addr, uint64_t *lp) |
| 129 | { |
| 130 | assert(host_powerpc64()); |
| 131 | unsigned long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0); |
| 132 | if (l == -1UL && errno) |
| 133 | return -1; |
| 134 | *lp = l; |
| 135 | return 0; |
| 136 | } |
| 137 | |
| 138 | static int |
| 139 | read_target_long(struct Process *proc, target_address_t addr, uint64_t *lp) |
| 140 | { |
| 141 | if (proc->e_machine == EM_PPC) { |
| 142 | uint32_t w; |
| 143 | int ret = read_target_4(proc, addr, &w); |
| 144 | if (ret >= 0) |
| 145 | *lp = (uint64_t)w; |
| 146 | return ret; |
| 147 | } else { |
| 148 | return read_target_8(proc, addr, lp); |
| 149 | } |
| 150 | } |
| 151 | |
| Petr Machata | d957332 | 2012-04-17 05:21:02 +0200 | [diff] [blame] | 152 | static enum callback_status |
| 153 | reenable_breakpoint(struct Process *proc, struct breakpoint *bp, void *data) |
| 154 | { |
| 155 | /* We don't need to re-enable non-PLT breakpoints and |
| 156 | * breakpoints that are not PPC32 BSS unprelinked. */ |
| 157 | if (bp->libsym == NULL |
| 158 | || bp->libsym->plt_type == LS_TOPLT_NONE |
| 159 | || bp->libsym->lib->arch.bss_plt_prelinked != 0) |
| 160 | return CBS_CONT; |
| 161 | |
| 162 | debug(DEBUG_PROCESS, "pid=%d reenable_breakpoint %s", |
| 163 | proc->pid, breakpoint_name(bp)); |
| 164 | |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 165 | assert(proc->e_machine == EM_PPC); |
| 166 | uint32_t l; |
| 167 | if (read_target_4(proc, bp->addr, &l) < 0) { |
| 168 | error(0, errno, "couldn't read PLT value for %s(%p)", |
| 169 | breakpoint_name(bp), bp->addr); |
| 170 | return CBS_CONT; |
| 171 | } |
| 172 | bp->libsym->arch.plt_slot_addr = (GElf_Addr)bp->addr; |
| 173 | bp->libsym->arch.resolved_value = l; |
| 174 | |
| Petr Machata | d957332 | 2012-04-17 05:21:02 +0200 | [diff] [blame] | 175 | /* Re-enable the breakpoint that was overwritten by the |
| 176 | * dynamic linker. XXX unfortunately it's overwritten |
| 177 | * again after the first call :-/ */ |
| 178 | enable_breakpoint(proc, bp); |
| 179 | |
| 180 | return CBS_CONT; |
| 181 | } |
| 182 | |
| 183 | void |
| 184 | arch_dynlink_done(struct Process *proc) |
| 185 | { |
| 186 | /* On PPC32, .plt of objects that use BSS PLT are overwritten |
| 187 | * by the dynamic linker (unless that object was prelinked). |
| 188 | * We need to re-enable breakpoints in those objects. */ |
| 189 | proc_each_breakpoint(proc, NULL, reenable_breakpoint, NULL); |
| 190 | } |
| 191 | |
| Juan Cespedes | f135052 | 2008-12-16 18:19:58 +0100 | [diff] [blame] | 192 | GElf_Addr |
| Petr Machata | 4e2073f | 2012-03-21 05:15:44 +0100 | [diff] [blame] | 193 | arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela *rela) |
| 194 | { |
| 195 | if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) { |
| 196 | assert(lte->arch.plt_stub_vma != 0); |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 197 | return lte->arch.plt_stub_vma + PPC_PLT_STUB_SIZE * ndx; |
| Petr Machata | 4e2073f | 2012-03-21 05:15:44 +0100 | [diff] [blame] | 198 | |
| 199 | } else if (lte->ehdr.e_machine == EM_PPC) { |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 200 | return rela->r_offset; |
| Petr Machata | 4e2073f | 2012-03-21 05:15:44 +0100 | [diff] [blame] | 201 | |
| 202 | } else { |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 203 | /* If we get here, we don't have stub symbols. In |
| 204 | * that case we put brakpoints to PLT entries the same |
| 205 | * as the PPC32 secure PLT case does. */ |
| 206 | assert(lte->arch.plt_stub_vma != 0); |
| 207 | return lte->arch.plt_stub_vma + PPC64_PLT_STUB_SIZE * ndx; |
| Petr Machata | 4e2073f | 2012-03-21 05:15:44 +0100 | [diff] [blame] | 208 | } |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | int |
| 212 | arch_translate_address(struct Process *proc, |
| 213 | target_address_t addr, target_address_t *ret) |
| 214 | { |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 215 | if (proc->e_machine == EM_PPC64) { |
| 216 | assert(host_powerpc64()); |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 217 | long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0); |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 218 | if (l == -1 && errno) { |
| 219 | error(0, errno, ".opd translation of %p", addr); |
| 220 | return -1; |
| 221 | } |
| 222 | *ret = (target_address_t)l; |
| 223 | return 0; |
| 224 | } |
| 225 | |
| 226 | *ret = addr; |
| 227 | return 0; |
| Juan Cespedes | d914a20 | 2004-11-10 00:15:33 +0100 | [diff] [blame] | 228 | } |
| Ian Wienand | 9a2ad35 | 2006-02-20 22:44:45 +0100 | [diff] [blame] | 229 | |
| Juan Cespedes | f135052 | 2008-12-16 18:19:58 +0100 | [diff] [blame] | 230 | void * |
| Petr Machata | 18c801c | 2012-04-07 01:24:08 +0200 | [diff] [blame] | 231 | sym2addr(struct Process *proc, struct library_symbol *sym) |
| 232 | { |
| 233 | return sym->enter_addr; |
| Ian Wienand | 9a2ad35 | 2006-02-20 22:44:45 +0100 | [diff] [blame] | 234 | } |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 235 | |
| 236 | static GElf_Addr |
| 237 | get_glink_vma(struct ltelf *lte, GElf_Addr ppcgot, Elf_Data *plt_data) |
| 238 | { |
| 239 | Elf_Scn *ppcgot_sec = NULL; |
| 240 | GElf_Shdr ppcgot_shdr; |
| 241 | if (ppcgot != 0 |
| 242 | && elf_get_section_covering(lte, ppcgot, |
| 243 | &ppcgot_sec, &ppcgot_shdr) < 0) |
| Petr Machata | 8b00d5b | 2012-04-06 16:05:10 +0200 | [diff] [blame] | 244 | error(0, 0, "DT_PPC_GOT=%#"PRIx64", but no such section found", |
| 245 | ppcgot); |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 246 | |
| 247 | if (ppcgot_sec != NULL) { |
| 248 | Elf_Data *data = elf_loaddata(ppcgot_sec, &ppcgot_shdr); |
| 249 | if (data == NULL || data->d_size < 8 ) { |
| Petr Machata | 8b00d5b | 2012-04-06 16:05:10 +0200 | [diff] [blame] | 250 | error(0, 0, "couldn't read GOT data"); |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 251 | } else { |
| 252 | // where PPCGOT begins in .got |
| 253 | size_t offset = ppcgot - ppcgot_shdr.sh_addr; |
| 254 | assert(offset % 4 == 0); |
| 255 | uint32_t glink_vma; |
| 256 | if (elf_read_u32(data, offset + 4, &glink_vma) < 0) { |
| Petr Machata | 8b00d5b | 2012-04-06 16:05:10 +0200 | [diff] [blame] | 257 | error(0, 0, "couldn't read glink VMA address" |
| 258 | " at %zd@GOT", offset); |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 259 | return 0; |
| 260 | } |
| 261 | if (glink_vma != 0) { |
| 262 | debug(1, "PPC GOT glink_vma address: %#" PRIx32, |
| 263 | glink_vma); |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 264 | return (GElf_Addr)glink_vma; |
| 265 | } |
| 266 | } |
| 267 | } |
| 268 | |
| 269 | if (plt_data != NULL) { |
| 270 | uint32_t glink_vma; |
| 271 | if (elf_read_u32(plt_data, 0, &glink_vma) < 0) { |
| Petr Machata | 8b00d5b | 2012-04-06 16:05:10 +0200 | [diff] [blame] | 272 | error(0, 0, "couldn't read glink VMA address"); |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 273 | return 0; |
| 274 | } |
| 275 | debug(1, ".plt glink_vma address: %#" PRIx32, glink_vma); |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 276 | return (GElf_Addr)glink_vma; |
| 277 | } |
| 278 | |
| 279 | return 0; |
| 280 | } |
| 281 | |
| Petr Machata | 644d669 | 2012-03-24 02:06:48 +0100 | [diff] [blame] | 282 | static int |
| Petr Machata | d1746d1 | 2012-03-27 03:14:14 +0200 | [diff] [blame] | 283 | load_dynamic_entry(struct ltelf *lte, int tag, GElf_Addr *valuep) |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 284 | { |
| Petr Machata | 644d669 | 2012-03-24 02:06:48 +0100 | [diff] [blame] | 285 | Elf_Scn *scn; |
| 286 | GElf_Shdr shdr; |
| 287 | if (elf_get_section_type(lte, SHT_DYNAMIC, &scn, &shdr) < 0 |
| 288 | || scn == NULL) { |
| 289 | fail: |
| 290 | error(0, 0, "Couldn't get SHT_DYNAMIC: %s", |
| 291 | elf_errmsg(-1)); |
| 292 | return -1; |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 293 | } |
| Petr Machata | 644d669 | 2012-03-24 02:06:48 +0100 | [diff] [blame] | 294 | |
| 295 | Elf_Data *data = elf_loaddata(scn, &shdr); |
| 296 | if (data == NULL) |
| 297 | goto fail; |
| 298 | |
| 299 | size_t j; |
| 300 | for (j = 0; j < shdr.sh_size / shdr.sh_entsize; ++j) { |
| 301 | GElf_Dyn dyn; |
| 302 | if (gelf_getdyn(data, j, &dyn) == NULL) |
| 303 | goto fail; |
| 304 | |
| Petr Machata | d1746d1 | 2012-03-27 03:14:14 +0200 | [diff] [blame] | 305 | if(dyn.d_tag == tag) { |
| 306 | *valuep = dyn.d_un.d_ptr; |
| Petr Machata | 644d669 | 2012-03-24 02:06:48 +0100 | [diff] [blame] | 307 | return 0; |
| 308 | } |
| 309 | } |
| 310 | |
| 311 | return -1; |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 312 | } |
| 313 | |
| Petr Machata | d1746d1 | 2012-03-27 03:14:14 +0200 | [diff] [blame] | 314 | static int |
| 315 | load_ppcgot(struct ltelf *lte, GElf_Addr *ppcgotp) |
| 316 | { |
| 317 | return load_dynamic_entry(lte, DT_PPC_GOT, ppcgotp); |
| 318 | } |
| 319 | |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 320 | static int |
| 321 | load_ppc64_glink(struct ltelf *lte, GElf_Addr *glinkp) |
| 322 | { |
| 323 | return load_dynamic_entry(lte, DT_PPC64_GLINK, glinkp); |
| 324 | } |
| 325 | |
| Petr Machata | d957332 | 2012-04-17 05:21:02 +0200 | [diff] [blame] | 326 | static int |
| 327 | nonzero_data(Elf_Data *data) |
| 328 | { |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 329 | /* We are not supposed to get here if there's no PLT. */ |
| Petr Machata | d957332 | 2012-04-17 05:21:02 +0200 | [diff] [blame] | 330 | assert(data != NULL); |
| 331 | |
| 332 | unsigned char *buf = data->d_buf; |
| 333 | if (buf == NULL) |
| 334 | return 0; |
| 335 | |
| 336 | size_t i; |
| 337 | for (i = 0; i < data->d_size; ++i) |
| 338 | if (buf[i] != 0) |
| 339 | return 1; |
| 340 | return 0; |
| 341 | } |
| 342 | |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 343 | int |
| Petr Machata | d957332 | 2012-04-17 05:21:02 +0200 | [diff] [blame] | 344 | arch_elf_init(struct ltelf *lte, struct library *lib) |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 345 | { |
| Petr Machata | 18c801c | 2012-04-07 01:24:08 +0200 | [diff] [blame] | 346 | lte->arch.secure_plt = !(lte->plt_flags & SHF_EXECINSTR); |
| Petr Machata | d957332 | 2012-04-17 05:21:02 +0200 | [diff] [blame] | 347 | |
| 348 | /* For PPC32 BSS, it is important whether the binary was |
| 349 | * prelinked. If .plt section is NODATA, or if it contains |
| 350 | * zeroes, then this library is not prelinked, and we need to |
| 351 | * delay breakpoints. */ |
| 352 | if (lte->ehdr.e_machine == EM_PPC && !lte->arch.secure_plt) |
| 353 | lib->arch.bss_plt_prelinked = nonzero_data(lte->plt_data); |
| 354 | else |
| 355 | /* For cases where it's irrelevant, initialize the |
| 356 | * value to something conspicuous. */ |
| 357 | lib->arch.bss_plt_prelinked = -1; |
| 358 | |
| Petr Machata | 4e2073f | 2012-03-21 05:15:44 +0100 | [diff] [blame] | 359 | if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) { |
| Petr Machata | 644d669 | 2012-03-24 02:06:48 +0100 | [diff] [blame] | 360 | GElf_Addr ppcgot; |
| 361 | if (load_ppcgot(lte, &ppcgot) < 0) { |
| Petr Machata | 8b00d5b | 2012-04-06 16:05:10 +0200 | [diff] [blame] | 362 | error(0, 0, "couldn't find DT_PPC_GOT"); |
| Petr Machata | 644d669 | 2012-03-24 02:06:48 +0100 | [diff] [blame] | 363 | return -1; |
| 364 | } |
| 365 | GElf_Addr glink_vma = get_glink_vma(lte, ppcgot, lte->plt_data); |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 366 | |
| 367 | assert (lte->relplt_size % 12 == 0); |
| 368 | size_t count = lte->relplt_size / 12; // size of RELA entry |
| 369 | lte->arch.plt_stub_vma = glink_vma |
| 370 | - (GElf_Addr)count * PPC_PLT_STUB_SIZE; |
| 371 | debug(1, "stub_vma is %#" PRIx64, lte->arch.plt_stub_vma); |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 372 | |
| 373 | } else if (lte->ehdr.e_machine == EM_PPC64) { |
| 374 | GElf_Addr glink_vma; |
| 375 | if (load_ppc64_glink(lte, &glink_vma) < 0) { |
| Petr Machata | 8b00d5b | 2012-04-06 16:05:10 +0200 | [diff] [blame] | 376 | error(0, 0, "couldn't find DT_PPC64_GLINK"); |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 377 | return -1; |
| 378 | } |
| 379 | |
| 380 | /* The first glink stub starts at offset 32. */ |
| 381 | lte->arch.plt_stub_vma = glink_vma + 32; |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 382 | } |
| 383 | |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 384 | /* On PPC64, look for stub symbols in symbol table. These are |
| 385 | * called: xxxxxxxx.plt_call.callee_name@version+addend. */ |
| 386 | if (lte->ehdr.e_machine == EM_PPC64 |
| 387 | && lte->symtab != NULL && lte->strtab != NULL) { |
| 388 | |
| 389 | /* N.B. We can't simply skip the symbols that we fail |
| 390 | * to read or malloc. There may be more than one stub |
| 391 | * per symbol name, and if we failed in one but |
| 392 | * succeeded in another, the PLT enabling code would |
| 393 | * have no way to tell that something is missing. We |
| 394 | * could work around that, of course, but it doesn't |
| Petr Machata | 7b36114 | 2012-03-24 14:27:01 +0100 | [diff] [blame] | 395 | * seem worth the trouble. So if anything fails, we |
| 396 | * just pretend that we don't have stub symbols at |
| 397 | * all, as if the binary is stripped. */ |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 398 | |
| 399 | size_t i; |
| 400 | for (i = 0; i < lte->symtab_count; ++i) { |
| 401 | GElf_Sym sym; |
| Petr Machata | 7b36114 | 2012-03-24 14:27:01 +0100 | [diff] [blame] | 402 | if (gelf_getsym(lte->symtab, i, &sym) == NULL) { |
| 403 | struct library_symbol *sym, *next; |
| 404 | fail: |
| 405 | for (sym = lte->arch.stubs; sym != NULL; ) { |
| 406 | next = sym->next; |
| 407 | library_symbol_destroy(sym); |
| 408 | free(sym); |
| 409 | sym = next; |
| 410 | } |
| 411 | lte->arch.stubs = NULL; |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 412 | break; |
| Petr Machata | 7b36114 | 2012-03-24 14:27:01 +0100 | [diff] [blame] | 413 | } |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 414 | |
| 415 | const char *name = lte->strtab + sym.st_name; |
| 416 | |
| 417 | #define STUBN ".plt_call." |
| 418 | if ((name = strstr(name, STUBN)) == NULL) |
| 419 | continue; |
| 420 | name += sizeof(STUBN) - 1; |
| 421 | #undef STUBN |
| 422 | |
| 423 | size_t len; |
| 424 | const char *ver = strchr(name, '@'); |
| 425 | if (ver != NULL) { |
| 426 | len = ver - name; |
| 427 | |
| 428 | } else { |
| 429 | /* If there is "+" at all, check that |
| 430 | * the symbol name ends in "+0". */ |
| 431 | const char *add = strrchr(name, '+'); |
| 432 | if (add != NULL) { |
| 433 | assert(strcmp(add, "+0") == 0); |
| 434 | len = add - name; |
| 435 | } else { |
| 436 | len = strlen(name); |
| 437 | } |
| 438 | } |
| 439 | |
| 440 | char *sym_name = strndup(name, len); |
| Petr Machata | 7b36114 | 2012-03-24 14:27:01 +0100 | [diff] [blame] | 441 | struct library_symbol *libsym = malloc(sizeof(*libsym)); |
| 442 | if (sym_name == NULL || libsym == NULL) { |
| Petr Machata | e8d9076 | 2012-04-15 04:28:31 +0200 | [diff] [blame] | 443 | fail2: |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 444 | free(sym_name); |
| Petr Machata | 7b36114 | 2012-03-24 14:27:01 +0100 | [diff] [blame] | 445 | free(libsym); |
| 446 | goto fail; |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 447 | } |
| 448 | |
| Petr Machata | ea8eb9a | 2012-04-17 01:32:07 +0200 | [diff] [blame] | 449 | /* XXX The double cast should be removed when |
| 450 | * target_address_t becomes integral type. */ |
| 451 | target_address_t addr = (target_address_t) |
| 452 | (uintptr_t)sym.st_value + lte->bias; |
| Petr Machata | e8d9076 | 2012-04-15 04:28:31 +0200 | [diff] [blame] | 453 | if (library_symbol_init(libsym, addr, sym_name, 1, |
| 454 | LS_TOPLT_EXEC) < 0) |
| 455 | goto fail2; |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 456 | libsym->arch.type = PPC64PLT_STUB; |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 457 | libsym->next = lte->arch.stubs; |
| 458 | lte->arch.stubs = libsym; |
| 459 | } |
| 460 | } |
| 461 | |
| Petr Machata | e67635d | 2012-03-21 03:37:39 +0100 | [diff] [blame] | 462 | return 0; |
| 463 | } |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 464 | |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 465 | static int |
| 466 | read_plt_slot_value(struct Process *proc, GElf_Addr addr, GElf_Addr *valp) |
| 467 | { |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 468 | /* On PPC64, we read from .plt, which contains 8 byte |
| 469 | * addresses. On PPC32 we read from .plt, which contains 4 |
| 470 | * byte instructions. So read_target_long is appropriate. */ |
| 471 | uint64_t l; |
| 472 | if (read_target_long(proc, (target_address_t)addr, &l) < 0) { |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 473 | error(0, errno, "ptrace .plt slot value @%#" PRIx64, addr); |
| 474 | return -1; |
| 475 | } |
| 476 | |
| 477 | *valp = (GElf_Addr)l; |
| 478 | return 0; |
| 479 | } |
| 480 | |
| 481 | static int |
| 482 | unresolve_plt_slot(struct Process *proc, GElf_Addr addr, GElf_Addr value) |
| 483 | { |
| 484 | /* We only modify plt_entry[0], which holds the resolved |
| 485 | * address of the routine. We keep the TOC and environment |
| 486 | * pointers intact. Hence the only adjustment that we need to |
| 487 | * do is to IP. */ |
| 488 | if (ptrace(PTRACE_POKETEXT, proc->pid, addr, value) < 0) { |
| 489 | error(0, errno, "unresolve .plt slot"); |
| 490 | return -1; |
| 491 | } |
| 492 | return 0; |
| 493 | } |
| 494 | |
| Petr Machata | 8557b4a | 2012-04-17 17:02:11 +0200 | [diff] [blame] | 495 | static void |
| 496 | mark_as_resolved(struct library_symbol *libsym, GElf_Addr value) |
| 497 | { |
| 498 | libsym->arch.type = PPC_PLT_RESOLVED; |
| 499 | libsym->arch.resolved_value = value; |
| 500 | } |
| 501 | |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 502 | enum plt_status |
| 503 | arch_elf_add_plt_entry(struct Process *proc, struct ltelf *lte, |
| Petr Machata | d1746d1 | 2012-03-27 03:14:14 +0200 | [diff] [blame] | 504 | const char *a_name, GElf_Rela *rela, size_t ndx, |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 505 | struct library_symbol **ret) |
| 506 | { |
| 507 | if (lte->ehdr.e_machine == EM_PPC) |
| 508 | return plt_default; |
| 509 | |
| 510 | /* PPC64. If we have stubs, we return a chain of breakpoint |
| 511 | * sites, one for each stub that corresponds to this PLT |
| 512 | * entry. */ |
| 513 | struct library_symbol *chain = NULL; |
| 514 | struct library_symbol **symp; |
| 515 | for (symp = <e->arch.stubs; *symp != NULL; ) { |
| 516 | struct library_symbol *sym = *symp; |
| 517 | if (strcmp(sym->name, a_name) != 0) { |
| 518 | symp = &(*symp)->next; |
| 519 | continue; |
| 520 | } |
| 521 | |
| 522 | /* Re-chain the symbol from stubs to CHAIN. */ |
| 523 | *symp = sym->next; |
| 524 | sym->next = chain; |
| 525 | chain = sym; |
| 526 | } |
| 527 | |
| 528 | if (chain != NULL) { |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 529 | *ret = chain; |
| 530 | return plt_ok; |
| 531 | } |
| 532 | |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 533 | /* We don't have stub symbols. Find corresponding .plt slot, |
| 534 | * and check whether it contains the corresponding PLT address |
| 535 | * (or 0 if the dynamic linker hasn't run yet). N.B. we don't |
| 536 | * want read this from ELF file, but from process image. That |
| 537 | * makes a difference if we are attaching to a running |
| 538 | * process. */ |
| 539 | |
| 540 | GElf_Addr plt_entry_addr = arch_plt_sym_val(lte, ndx, rela); |
| 541 | GElf_Addr plt_slot_addr = rela->r_offset; |
| 542 | assert(plt_slot_addr >= lte->plt_addr |
| 543 | || plt_slot_addr < lte->plt_addr + lte->plt_size); |
| 544 | |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 545 | GElf_Addr plt_slot_value; |
| 546 | if (read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value) < 0) |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 547 | return plt_fail; |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 548 | |
| 549 | char *name = strdup(a_name); |
| 550 | struct library_symbol *libsym = malloc(sizeof(*libsym)); |
| 551 | if (name == NULL || libsym == NULL) { |
| 552 | error(0, errno, "allocation for .plt slot"); |
| 553 | fail: |
| 554 | free(name); |
| 555 | free(libsym); |
| 556 | return plt_fail; |
| 557 | } |
| 558 | |
| Petr Machata | ea8eb9a | 2012-04-17 01:32:07 +0200 | [diff] [blame] | 559 | /* XXX The double cast should be removed when |
| 560 | * target_address_t becomes integral type. */ |
| 561 | if (library_symbol_init(libsym, |
| 562 | (target_address_t)(uintptr_t)plt_entry_addr, |
| Petr Machata | e8d9076 | 2012-04-15 04:28:31 +0200 | [diff] [blame] | 563 | name, 1, LS_TOPLT_EXEC) < 0) |
| 564 | goto fail; |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 565 | libsym->arch.plt_slot_addr = plt_slot_addr; |
| 566 | |
| 567 | if (plt_slot_value == plt_entry_addr || plt_slot_value == 0) { |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 568 | libsym->arch.type = PPC64PLT_UNRESOLVED; |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 569 | libsym->arch.resolved_value = plt_entry_addr; |
| 570 | |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 571 | } else { |
| 572 | /* Unresolve the .plt slot. If the binary was |
| 573 | * prelinked, this makes the code invalid, because in |
| 574 | * case of prelinked binary, the dynamic linker |
| 575 | * doesn't update .plt[0] and .plt[1] with addresses |
| 576 | * of the resover. But we don't care, we will never |
| 577 | * need to enter the resolver. That just means that |
| 578 | * we have to un-un-resolve this back before we |
| Petr Machata | 19c0f29 | 2012-04-15 19:09:02 +0200 | [diff] [blame] | 579 | * detach. */ |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 580 | |
| Petr Machata | e5ebe21 | 2012-04-15 04:41:13 +0200 | [diff] [blame] | 581 | if (unresolve_plt_slot(proc, plt_slot_addr, plt_entry_addr) < 0) { |
| 582 | library_symbol_destroy(libsym); |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 583 | goto fail; |
| Petr Machata | e5ebe21 | 2012-04-15 04:41:13 +0200 | [diff] [blame] | 584 | } |
| Petr Machata | 8557b4a | 2012-04-17 17:02:11 +0200 | [diff] [blame] | 585 | mark_as_resolved(libsym, plt_slot_value); |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 586 | } |
| 587 | |
| 588 | *ret = libsym; |
| 589 | return plt_ok; |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 590 | } |
| 591 | |
| Petr Machata | 4d9a91c | 2012-03-24 04:55:03 +0100 | [diff] [blame] | 592 | void |
| 593 | arch_elf_destroy(struct ltelf *lte) |
| 594 | { |
| Petr Machata | 37d368e | 2012-03-24 04:58:08 +0100 | [diff] [blame] | 595 | struct library_symbol *sym; |
| 596 | for (sym = lte->arch.stubs; sym != NULL; ) { |
| 597 | struct library_symbol *next = sym->next; |
| 598 | library_symbol_destroy(sym); |
| 599 | free(sym); |
| 600 | sym = next; |
| 601 | } |
| Petr Machata | 4d9a91c | 2012-03-24 04:55:03 +0100 | [diff] [blame] | 602 | } |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 603 | |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 604 | static void |
| 605 | dl_plt_update_bp_on_hit(struct breakpoint *bp, struct Process *proc) |
| 606 | { |
| 607 | struct process_stopping_handler *self = proc->arch.handler; |
| 608 | assert(self != NULL); |
| 609 | |
| 610 | struct library_symbol *libsym = self->breakpoint_being_enabled->libsym; |
| 611 | GElf_Addr value; |
| 612 | if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0) |
| 613 | return; |
| 614 | |
| Petr Machata | 72b5ee8 | 2012-04-17 13:44:06 +0200 | [diff] [blame] | 615 | unresolve_plt_slot(proc, libsym->arch.plt_slot_addr, |
| 616 | libsym->arch.resolved_value); |
| Petr Machata | 8d930e8 | 2012-04-17 17:03:01 +0200 | [diff] [blame^] | 617 | mark_as_resolved(libsym, value); |
| Petr Machata | 72b5ee8 | 2012-04-17 13:44:06 +0200 | [diff] [blame] | 618 | |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 619 | /* cb_on_all_stopped looks if HANDLER is set to NULL as a way |
| 620 | * to check that this was run. It's an error if it |
| 621 | * wasn't. */ |
| 622 | breakpoint_turn_off(bp, proc); |
| 623 | proc->arch.handler = NULL; |
| 624 | } |
| 625 | |
| 626 | static void |
| 627 | cb_on_all_stopped(struct process_stopping_handler *self) |
| 628 | { |
| 629 | /* Put that in for dl_plt_update_bp_on_hit to see. */ |
| 630 | assert(self->task_enabling_breakpoint->arch.handler == NULL); |
| 631 | self->task_enabling_breakpoint->arch.handler = self; |
| 632 | |
| 633 | linux_ptrace_disable_and_continue(self); |
| 634 | } |
| 635 | |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 636 | static enum callback_status |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 637 | cb_keep_stepping_p(struct process_stopping_handler *self) |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 638 | { |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 639 | struct Process *proc = self->task_enabling_breakpoint; |
| 640 | struct library_symbol *libsym = self->breakpoint_being_enabled->libsym; |
| 641 | GElf_Addr value; |
| 642 | if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0) |
| 643 | return CBS_FAIL; |
| 644 | |
| 645 | /* In UNRESOLVED state, the RESOLVED_VALUE in fact contains |
| 646 | * the PLT entry value. */ |
| 647 | if (value == libsym->arch.resolved_value) |
| 648 | return CBS_CONT; |
| 649 | |
| 650 | /* The .plt slot got resolved! We can migrate the breakpoint |
| 651 | * to RESOLVED and stop single-stepping. */ |
| 652 | if (unresolve_plt_slot(proc, libsym->arch.plt_slot_addr, |
| 653 | libsym->arch.resolved_value) < 0) |
| 654 | return CBS_FAIL; |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 655 | |
| 656 | /* Install breakpoint to the address where the change takes |
| 657 | * place. If we fail, then that just means that we'll have to |
| 658 | * singlestep the next time around as well. */ |
| 659 | struct Process *leader = proc->leader; |
| 660 | if (leader == NULL || leader->arch.dl_plt_update_bp != NULL) |
| Petr Machata | 8557b4a | 2012-04-17 17:02:11 +0200 | [diff] [blame] | 661 | goto done; |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 662 | |
| 663 | /* We need to install to the next instruction. ADDR points to |
| 664 | * a store instruction, so moving the breakpoint one |
| 665 | * instruction forward is safe. */ |
| 666 | target_address_t addr = get_instruction_pointer(proc) + 4; |
| 667 | leader->arch.dl_plt_update_bp = insert_breakpoint(proc, addr, NULL); |
| Petr Machata | 8557b4a | 2012-04-17 17:02:11 +0200 | [diff] [blame] | 668 | if (leader->arch.dl_plt_update_bp == NULL) |
| 669 | goto done; |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 670 | |
| 671 | /* Turn it off for now. We will turn it on again when we hit |
| 672 | * the PLT entry that needs this. */ |
| 673 | breakpoint_turn_off(leader->arch.dl_plt_update_bp, proc); |
| 674 | |
| 675 | if (leader->arch.dl_plt_update_bp != NULL) { |
| 676 | static struct bp_callbacks dl_plt_update_cbs = { |
| 677 | .on_hit = dl_plt_update_bp_on_hit, |
| 678 | }; |
| 679 | leader->arch.dl_plt_update_bp->cbs = &dl_plt_update_cbs; |
| 680 | } |
| 681 | |
| Petr Machata | 8557b4a | 2012-04-17 17:02:11 +0200 | [diff] [blame] | 682 | done: |
| 683 | mark_as_resolved(libsym, value); |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 684 | |
| 685 | return CBS_STOP; |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 686 | } |
| 687 | |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 688 | static void |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 689 | ppc_plt_bp_continue(struct breakpoint *bp, struct Process *proc) |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 690 | { |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 691 | switch (bp->libsym->arch.type) { |
| 692 | target_address_t rv; |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 693 | struct Process *leader; |
| 694 | void (*on_all_stopped)(struct process_stopping_handler *); |
| 695 | enum callback_status (*keep_stepping_p) |
| 696 | (struct process_stopping_handler *); |
| 697 | |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 698 | case PPC_DEFAULT: |
| 699 | assert(proc->e_machine == EM_PPC); |
| 700 | assert(bp->libsym != NULL); |
| 701 | assert(bp->libsym->lib->arch.bss_plt_prelinked == 0); |
| 702 | /* fall-through */ |
| 703 | |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 704 | case PPC64PLT_UNRESOLVED: |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 705 | on_all_stopped = NULL; |
| 706 | keep_stepping_p = NULL; |
| 707 | leader = proc->leader; |
| 708 | |
| Petr Machata | 05058b7 | 2012-04-17 01:33:03 +0200 | [diff] [blame] | 709 | if (leader != NULL && leader->arch.dl_plt_update_bp != NULL |
| 710 | && breakpoint_turn_on(leader->arch.dl_plt_update_bp, |
| 711 | proc) >= 0) |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 712 | on_all_stopped = cb_on_all_stopped; |
| Petr Machata | 05058b7 | 2012-04-17 01:33:03 +0200 | [diff] [blame] | 713 | else |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 714 | keep_stepping_p = cb_keep_stepping_p; |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 715 | |
| 716 | if (process_install_stopping_handler |
| 717 | (proc, bp, on_all_stopped, keep_stepping_p, NULL) < 0) { |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 718 | error(0, 0, "ppc_plt_bp_continue: couldn't install" |
| 719 | " event handler"); |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 720 | continue_after_breakpoint(proc, bp); |
| 721 | } |
| 722 | return; |
| 723 | |
| 724 | case PPC64PLT_RESOLVED: |
| Petr Machata | ea8eb9a | 2012-04-17 01:32:07 +0200 | [diff] [blame] | 725 | /* XXX The double cast should be removed when |
| 726 | * target_address_t becomes integral type. */ |
| 727 | rv = (target_address_t) |
| 728 | (uintptr_t)bp->libsym->arch.resolved_value; |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 729 | set_instruction_pointer(proc, rv); |
| 730 | continue_process(proc->pid); |
| Petr Machata | 5096962 | 2012-04-06 16:06:26 +0200 | [diff] [blame] | 731 | return; |
| 732 | |
| 733 | case PPC64PLT_STUB: |
| Petr Machata | fbd9742 | 2012-04-16 21:09:18 +0200 | [diff] [blame] | 734 | /* These should never hit here. */ |
| Petr Machata | 5096962 | 2012-04-06 16:06:26 +0200 | [diff] [blame] | 735 | break; |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 736 | } |
| Petr Machata | 5096962 | 2012-04-06 16:06:26 +0200 | [diff] [blame] | 737 | |
| 738 | assert(bp->libsym->arch.type != bp->libsym->arch.type); |
| 739 | abort(); |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 740 | } |
| 741 | |
| Petr Machata | d957332 | 2012-04-17 05:21:02 +0200 | [diff] [blame] | 742 | void |
| 743 | arch_library_init(struct library *lib) |
| 744 | { |
| 745 | } |
| 746 | |
| 747 | void |
| 748 | arch_library_destroy(struct library *lib) |
| 749 | { |
| 750 | } |
| 751 | |
| 752 | void |
| 753 | arch_library_clone(struct library *retp, struct library *lib) |
| 754 | { |
| 755 | } |
| 756 | |
| Petr Machata | 24c6e9d | 2012-04-15 04:31:34 +0200 | [diff] [blame] | 757 | int |
| 758 | arch_library_symbol_init(struct library_symbol *libsym) |
| 759 | { |
| 760 | /* We set type explicitly in the code above, where we have the |
| 761 | * necessary context. This is for calls from ltrace-elf.c and |
| 762 | * such. */ |
| Petr Machata | fbd9742 | 2012-04-16 21:09:18 +0200 | [diff] [blame] | 763 | libsym->arch.type = PPC_DEFAULT; |
| Petr Machata | 24c6e9d | 2012-04-15 04:31:34 +0200 | [diff] [blame] | 764 | return 0; |
| 765 | } |
| 766 | |
| 767 | void |
| 768 | arch_library_symbol_destroy(struct library_symbol *libsym) |
| 769 | { |
| 770 | } |
| 771 | |
| 772 | int |
| 773 | arch_library_symbol_clone(struct library_symbol *retp, |
| 774 | struct library_symbol *libsym) |
| 775 | { |
| 776 | retp->arch = libsym->arch; |
| 777 | return 0; |
| 778 | } |
| 779 | |
| Petr Machata | 52dbfb1 | 2012-03-29 16:38:26 +0200 | [diff] [blame] | 780 | /* For some symbol types, we need to set up custom callbacks. XXX we |
| 781 | * don't need PROC here, we can store the data in BP if it is of |
| 782 | * interest to us. */ |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 783 | int |
| 784 | arch_breakpoint_init(struct Process *proc, struct breakpoint *bp) |
| 785 | { |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 786 | /* Artificial and entry-point breakpoints are plain. */ |
| 787 | if (bp->libsym == NULL || bp->libsym->plt_type != LS_TOPLT_EXEC) |
| Petr Machata | 052b5f1 | 2012-04-06 14:53:07 +0200 | [diff] [blame] | 788 | return 0; |
| 789 | |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 790 | /* On PPC, secure PLT and prelinked BSS PLT are plain. */ |
| 791 | if (proc->e_machine == EM_PPC |
| 792 | && bp->libsym->lib->arch.bss_plt_prelinked != 0) |
| 793 | return 0; |
| 794 | |
| 795 | /* On PPC64, stub PLT breakpoints are plain. */ |
| 796 | if (proc->e_machine == EM_PPC64 |
| 797 | && bp->libsym->arch.type == PPC64PLT_STUB) |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 798 | return 0; |
| 799 | |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 800 | static struct bp_callbacks cbs = { |
| Petr Machata | 9a45d22 | 2012-04-17 13:48:58 +0200 | [diff] [blame] | 801 | .on_continue = ppc_plt_bp_continue, |
| Petr Machata | 58b2d0f | 2012-03-28 02:19:20 +0200 | [diff] [blame] | 802 | }; |
| 803 | breakpoint_set_callbacks(bp, &cbs); |
| Petr Machata | b64b5c7 | 2012-03-27 03:19:42 +0200 | [diff] [blame] | 804 | return 0; |
| 805 | } |
| 806 | |
| 807 | void |
| 808 | arch_breakpoint_destroy(struct breakpoint *bp) |
| 809 | { |
| 810 | } |
| Petr Machata | d3cc988 | 2012-04-13 21:40:23 +0200 | [diff] [blame] | 811 | |
| 812 | int |
| 813 | arch_breakpoint_clone(struct breakpoint *retp, struct breakpoint *sbp) |
| 814 | { |
| 815 | retp->arch = sbp->arch; |
| 816 | return 0; |
| 817 | } |
| Petr Machata | 6b31418 | 2012-04-15 04:40:45 +0200 | [diff] [blame] | 818 | |
| 819 | int |
| 820 | arch_process_init(struct Process *proc) |
| 821 | { |
| 822 | proc->arch.dl_plt_update_bp = NULL; |
| 823 | proc->arch.handler = NULL; |
| 824 | return 0; |
| 825 | } |
| 826 | |
| 827 | void |
| 828 | arch_process_destroy(struct Process *proc) |
| 829 | { |
| 830 | } |
| 831 | |
| 832 | int |
| 833 | arch_process_clone(struct Process *retp, struct Process *proc) |
| 834 | { |
| 835 | retp->arch = proc->arch; |
| 836 | return 0; |
| 837 | } |
| 838 | |
| 839 | int |
| 840 | arch_process_exec(struct Process *proc) |
| 841 | { |
| 842 | return arch_process_init(proc); |
| 843 | } |