blob: f98f97bdcc0e8e8d398b7aab536197bb374cf470 [file] [log] [blame]
Juan Cespedesd914a202004-11-10 00:15:33 +01001#include <gelf.h>
Juan Cespedesa7af00d2009-07-26 13:23:18 +02002#include <sys/ptrace.h>
Petr Machatae67635d2012-03-21 03:37:39 +01003#include <errno.h>
4#include <error.h>
5#include <inttypes.h>
6#include <assert.h>
Petr Machata37d368e2012-03-24 04:58:08 +01007#include <string.h>
Petr Machatae67635d2012-03-21 03:37:39 +01008
Petr Machata366c2f42012-02-09 19:34:36 +01009#include "proc.h"
Juan Cespedesf7281232009-06-25 16:11:21 +020010#include "common.h"
Petr Machatae67635d2012-03-21 03:37:39 +010011#include "library.h"
Petr Machatab64b5c72012-03-27 03:19:42 +020012#include "breakpoint.h"
Petr Machata58b2d0f2012-03-28 02:19:20 +020013#include "linux-gnu/trace.h"
Petr Machatae67635d2012-03-21 03:37:39 +010014
Petr Machata37d368e2012-03-24 04:58:08 +010015/* There are two PLT types on 32-bit PPC: old-style, BSS PLT, and
16 * new-style "secure" PLT. We can tell one from the other by the
17 * flags on the .plt section. If it's +X (executable), it's BSS PLT,
18 * otherwise it's secure.
19 *
20 * BSS PLT works the same way as most architectures: the .plt section
21 * contains trampolines and we put breakpoints to those. With secure
22 * PLT, the .plt section doesn't contain instructions but addresses.
23 * The real PLT table is stored in .text. Addresses of those PLT
24 * entries can be computed, and it fact that's what the glink deal
25 * below does.
26 *
27 * If not prelinked, BSS PLT entries in the .plt section contain
28 * zeroes that are overwritten by the dynamic linker during start-up.
29 * For that reason, ltrace realizes those breakpoints only after
30 * .start is hit.
31 *
32 * 64-bit PPC is more involved. Program linker creates for each
33 * library call a _stub_ symbol named xxxxxxxx.plt_call.<callee>
34 * (where xxxxxxxx is a hexadecimal number). That stub does the call
35 * dispatch: it loads an address of a function to call from the
36 * section .plt, and branches. PLT entries themselves are essentially
37 * a curried call to the resolver. When the symbol is resolved, the
38 * resolver updates the value stored in .plt, and the next time
39 * around, the stub calls the library function directly. So we make
40 * at most one trip (none if the binary is prelinked) through each PLT
41 * entry, and correspondingly that is useless as a breakpoint site.
42 *
43 * Note the three confusing terms: stubs (that play the role of PLT
44 * entries), PLT entries, .plt section.
45 *
46 * We first check symbol tables and see if we happen to have stub
47 * symbols available. If yes we just put breakpoints to those, and
48 * treat them as usual breakpoints. The only tricky part is realizing
49 * that there can be more than one breakpoint per symbol.
50 *
51 * The case that we don't have the stub symbols available is harder.
52 * The following scheme uses two kinds of PLT breakpoints: unresolved
53 * and resolved (to some address). When the process starts (or when
54 * we attach), we distribute unresolved PLT breakpoints to the PLT
55 * entries (not stubs). Then we look in .plt, and for each entry
56 * whose value is different than the corresponding PLT entry address,
57 * we assume it was already resolved, and convert the breakpoint to
58 * resolved. We also rewrite the resolved value in .plt back to the
59 * PLT address.
60 *
61 * When a PLT entry hits a resolved breakpoint (which happens because
Petr Machata19c0f292012-04-15 19:09:02 +020062 * we rewrite .plt with the original unresolved addresses), we move
63 * the instruction pointer to the corresponding address and continue
64 * the process as if nothing happened.
Petr Machata37d368e2012-03-24 04:58:08 +010065 *
66 * When unresolved PLT entry is called for the first time, we need to
67 * catch the new value that the resolver will write to a .plt slot.
68 * We also need to prevent another thread from racing through and
69 * taking the branch without ltrace noticing. So when unresolved PLT
70 * entry hits, we have to stop all threads. We then single-step
71 * through the resolver, until the .plt slot changes. When it does,
72 * we treat it the same way as above: convert the PLT breakpoint to
73 * resolved, and rewrite the .plt value back to PLT address. We then
74 * start all threads again.
75 *
Petr Machata19c0f292012-04-15 19:09:02 +020076 * As an optimization, we remember the address where the address was
77 * resolved, and put a breakpoint there. The next time around (when
78 * the next PLT entry is to be resolved), instead of single-stepping
79 * through half the dynamic linker, we just let the thread run and hit
80 * this breakpoint. When it hits, we know the PLT entry was resolved.
Petr Machata58b2d0f2012-03-28 02:19:20 +020081 *
Petr Machata19c0f292012-04-15 19:09:02 +020082 * XXX TODO As an additional optimization, after the above is done, we
83 * might emulate the instruction that updates .plt. We would compute
84 * the resolved address, and instead of letting the dynamic linker put
85 * it in .plt, we would resolve the breakpoint to that address. This
86 * way we wouldn't need to stop other threads. Otherwise there's no
Petr Machata58b2d0f2012-03-28 02:19:20 +020087 * way around that. Unless we know where the stubs are, we don't have
88 * a way to catch a thread that would use the window of opportunity
Petr Machata19c0f292012-04-15 19:09:02 +020089 * between updating .plt and notifying ltrace that it happened.
90 *
91 * XXX TODO If we have hardware watch point, we might put a read watch
92 * on .plt slot, and discover the offenders this way. I don't know
93 * the details, but I assume at most a handful (like, one or two, if
94 * available at all) addresses may be watched at a time, and thus this
95 * would be used as an amendment of the above rather than full-on
96 * solution to PLT tracing on PPC.
Petr Machata37d368e2012-03-24 04:58:08 +010097 */
98
Petr Machatae67635d2012-03-21 03:37:39 +010099#define PPC_PLT_STUB_SIZE 16
Petr Machatab64b5c72012-03-27 03:19:42 +0200100#define PPC64_PLT_STUB_SIZE 8 //xxx
Petr Machatae67635d2012-03-21 03:37:39 +0100101
102static inline int
Petr Machata4e2073f2012-03-21 05:15:44 +0100103host_powerpc64()
Petr Machatae67635d2012-03-21 03:37:39 +0100104{
105#ifdef __powerpc64__
106 return 1;
107#else
108 return 0;
109#endif
110}
111
Juan Cespedesf1350522008-12-16 18:19:58 +0100112GElf_Addr
Petr Machata4e2073f2012-03-21 05:15:44 +0100113arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela *rela)
114{
115 if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) {
116 assert(lte->arch.plt_stub_vma != 0);
Petr Machatae67635d2012-03-21 03:37:39 +0100117 return lte->arch.plt_stub_vma + PPC_PLT_STUB_SIZE * ndx;
Petr Machata4e2073f2012-03-21 05:15:44 +0100118
119 } else if (lte->ehdr.e_machine == EM_PPC) {
Petr Machatae67635d2012-03-21 03:37:39 +0100120 return rela->r_offset;
Petr Machata4e2073f2012-03-21 05:15:44 +0100121
122 } else {
Petr Machatab64b5c72012-03-27 03:19:42 +0200123 /* If we get here, we don't have stub symbols. In
124 * that case we put brakpoints to PLT entries the same
125 * as the PPC32 secure PLT case does. */
126 assert(lte->arch.plt_stub_vma != 0);
127 return lte->arch.plt_stub_vma + PPC64_PLT_STUB_SIZE * ndx;
Petr Machata4e2073f2012-03-21 05:15:44 +0100128 }
Petr Machatae67635d2012-03-21 03:37:39 +0100129}
130
131int
132arch_translate_address(struct Process *proc,
133 target_address_t addr, target_address_t *ret)
134{
Petr Machatab64b5c72012-03-27 03:19:42 +0200135 if (proc->e_machine == EM_PPC64) {
136 assert(host_powerpc64());
Petr Machatae67635d2012-03-21 03:37:39 +0100137 long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
Petr Machatae67635d2012-03-21 03:37:39 +0100138 if (l == -1 && errno) {
139 error(0, errno, ".opd translation of %p", addr);
140 return -1;
141 }
142 *ret = (target_address_t)l;
143 return 0;
144 }
145
146 *ret = addr;
147 return 0;
Juan Cespedesd914a202004-11-10 00:15:33 +0100148}
Ian Wienand9a2ad352006-02-20 22:44:45 +0100149
Juan Cespedesf1350522008-12-16 18:19:58 +0100150void *
Petr Machata18c801c2012-04-07 01:24:08 +0200151sym2addr(struct Process *proc, struct library_symbol *sym)
152{
153 return sym->enter_addr;
Ian Wienand9a2ad352006-02-20 22:44:45 +0100154}
Petr Machatae67635d2012-03-21 03:37:39 +0100155
156static GElf_Addr
157get_glink_vma(struct ltelf *lte, GElf_Addr ppcgot, Elf_Data *plt_data)
158{
159 Elf_Scn *ppcgot_sec = NULL;
160 GElf_Shdr ppcgot_shdr;
161 if (ppcgot != 0
162 && elf_get_section_covering(lte, ppcgot,
163 &ppcgot_sec, &ppcgot_shdr) < 0)
Petr Machata8b00d5b2012-04-06 16:05:10 +0200164 error(0, 0, "DT_PPC_GOT=%#"PRIx64", but no such section found",
165 ppcgot);
Petr Machatae67635d2012-03-21 03:37:39 +0100166
167 if (ppcgot_sec != NULL) {
168 Elf_Data *data = elf_loaddata(ppcgot_sec, &ppcgot_shdr);
169 if (data == NULL || data->d_size < 8 ) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200170 error(0, 0, "couldn't read GOT data");
Petr Machatae67635d2012-03-21 03:37:39 +0100171 } else {
172 // where PPCGOT begins in .got
173 size_t offset = ppcgot - ppcgot_shdr.sh_addr;
174 assert(offset % 4 == 0);
175 uint32_t glink_vma;
176 if (elf_read_u32(data, offset + 4, &glink_vma) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200177 error(0, 0, "couldn't read glink VMA address"
178 " at %zd@GOT", offset);
Petr Machatae67635d2012-03-21 03:37:39 +0100179 return 0;
180 }
181 if (glink_vma != 0) {
182 debug(1, "PPC GOT glink_vma address: %#" PRIx32,
183 glink_vma);
Petr Machatae67635d2012-03-21 03:37:39 +0100184 return (GElf_Addr)glink_vma;
185 }
186 }
187 }
188
189 if (plt_data != NULL) {
190 uint32_t glink_vma;
191 if (elf_read_u32(plt_data, 0, &glink_vma) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200192 error(0, 0, "couldn't read glink VMA address");
Petr Machatae67635d2012-03-21 03:37:39 +0100193 return 0;
194 }
195 debug(1, ".plt glink_vma address: %#" PRIx32, glink_vma);
Petr Machatae67635d2012-03-21 03:37:39 +0100196 return (GElf_Addr)glink_vma;
197 }
198
199 return 0;
200}
201
Petr Machata644d6692012-03-24 02:06:48 +0100202static int
Petr Machatad1746d12012-03-27 03:14:14 +0200203load_dynamic_entry(struct ltelf *lte, int tag, GElf_Addr *valuep)
Petr Machatae67635d2012-03-21 03:37:39 +0100204{
Petr Machata644d6692012-03-24 02:06:48 +0100205 Elf_Scn *scn;
206 GElf_Shdr shdr;
207 if (elf_get_section_type(lte, SHT_DYNAMIC, &scn, &shdr) < 0
208 || scn == NULL) {
209 fail:
210 error(0, 0, "Couldn't get SHT_DYNAMIC: %s",
211 elf_errmsg(-1));
212 return -1;
Petr Machatae67635d2012-03-21 03:37:39 +0100213 }
Petr Machata644d6692012-03-24 02:06:48 +0100214
215 Elf_Data *data = elf_loaddata(scn, &shdr);
216 if (data == NULL)
217 goto fail;
218
219 size_t j;
220 for (j = 0; j < shdr.sh_size / shdr.sh_entsize; ++j) {
221 GElf_Dyn dyn;
222 if (gelf_getdyn(data, j, &dyn) == NULL)
223 goto fail;
224
Petr Machatad1746d12012-03-27 03:14:14 +0200225 if(dyn.d_tag == tag) {
226 *valuep = dyn.d_un.d_ptr;
Petr Machata644d6692012-03-24 02:06:48 +0100227 return 0;
228 }
229 }
230
231 return -1;
Petr Machatae67635d2012-03-21 03:37:39 +0100232}
233
Petr Machatad1746d12012-03-27 03:14:14 +0200234static int
235load_ppcgot(struct ltelf *lte, GElf_Addr *ppcgotp)
236{
237 return load_dynamic_entry(lte, DT_PPC_GOT, ppcgotp);
238}
239
Petr Machatab64b5c72012-03-27 03:19:42 +0200240static int
241load_ppc64_glink(struct ltelf *lte, GElf_Addr *glinkp)
242{
243 return load_dynamic_entry(lte, DT_PPC64_GLINK, glinkp);
244}
245
Petr Machatae67635d2012-03-21 03:37:39 +0100246int
247arch_elf_init(struct ltelf *lte)
248{
Petr Machata18c801c2012-04-07 01:24:08 +0200249 lte->arch.secure_plt = !(lte->plt_flags & SHF_EXECINSTR);
Petr Machata4e2073f2012-03-21 05:15:44 +0100250 if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) {
Petr Machata644d6692012-03-24 02:06:48 +0100251 GElf_Addr ppcgot;
252 if (load_ppcgot(lte, &ppcgot) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200253 error(0, 0, "couldn't find DT_PPC_GOT");
Petr Machata644d6692012-03-24 02:06:48 +0100254 return -1;
255 }
256 GElf_Addr glink_vma = get_glink_vma(lte, ppcgot, lte->plt_data);
Petr Machatae67635d2012-03-21 03:37:39 +0100257
258 assert (lte->relplt_size % 12 == 0);
259 size_t count = lte->relplt_size / 12; // size of RELA entry
260 lte->arch.plt_stub_vma = glink_vma
261 - (GElf_Addr)count * PPC_PLT_STUB_SIZE;
262 debug(1, "stub_vma is %#" PRIx64, lte->arch.plt_stub_vma);
Petr Machatab64b5c72012-03-27 03:19:42 +0200263
264 } else if (lte->ehdr.e_machine == EM_PPC64) {
265 GElf_Addr glink_vma;
266 if (load_ppc64_glink(lte, &glink_vma) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200267 error(0, 0, "couldn't find DT_PPC64_GLINK");
Petr Machatab64b5c72012-03-27 03:19:42 +0200268 return -1;
269 }
270
271 /* The first glink stub starts at offset 32. */
272 lte->arch.plt_stub_vma = glink_vma + 32;
Petr Machatae67635d2012-03-21 03:37:39 +0100273 }
274
Petr Machata37d368e2012-03-24 04:58:08 +0100275 /* On PPC64, look for stub symbols in symbol table. These are
276 * called: xxxxxxxx.plt_call.callee_name@version+addend. */
277 if (lte->ehdr.e_machine == EM_PPC64
278 && lte->symtab != NULL && lte->strtab != NULL) {
279
280 /* N.B. We can't simply skip the symbols that we fail
281 * to read or malloc. There may be more than one stub
282 * per symbol name, and if we failed in one but
283 * succeeded in another, the PLT enabling code would
284 * have no way to tell that something is missing. We
285 * could work around that, of course, but it doesn't
Petr Machata7b361142012-03-24 14:27:01 +0100286 * seem worth the trouble. So if anything fails, we
287 * just pretend that we don't have stub symbols at
288 * all, as if the binary is stripped. */
Petr Machata37d368e2012-03-24 04:58:08 +0100289
290 size_t i;
291 for (i = 0; i < lte->symtab_count; ++i) {
292 GElf_Sym sym;
Petr Machata7b361142012-03-24 14:27:01 +0100293 if (gelf_getsym(lte->symtab, i, &sym) == NULL) {
294 struct library_symbol *sym, *next;
295 fail:
296 for (sym = lte->arch.stubs; sym != NULL; ) {
297 next = sym->next;
298 library_symbol_destroy(sym);
299 free(sym);
300 sym = next;
301 }
302 lte->arch.stubs = NULL;
Petr Machata37d368e2012-03-24 04:58:08 +0100303 break;
Petr Machata7b361142012-03-24 14:27:01 +0100304 }
Petr Machata37d368e2012-03-24 04:58:08 +0100305
306 const char *name = lte->strtab + sym.st_name;
307
308#define STUBN ".plt_call."
309 if ((name = strstr(name, STUBN)) == NULL)
310 continue;
311 name += sizeof(STUBN) - 1;
312#undef STUBN
313
314 size_t len;
315 const char *ver = strchr(name, '@');
316 if (ver != NULL) {
317 len = ver - name;
318
319 } else {
320 /* If there is "+" at all, check that
321 * the symbol name ends in "+0". */
322 const char *add = strrchr(name, '+');
323 if (add != NULL) {
324 assert(strcmp(add, "+0") == 0);
325 len = add - name;
326 } else {
327 len = strlen(name);
328 }
329 }
330
331 char *sym_name = strndup(name, len);
Petr Machata7b361142012-03-24 14:27:01 +0100332 struct library_symbol *libsym = malloc(sizeof(*libsym));
333 if (sym_name == NULL || libsym == NULL) {
Petr Machatae8d90762012-04-15 04:28:31 +0200334 fail2:
Petr Machata37d368e2012-03-24 04:58:08 +0100335 free(sym_name);
Petr Machata7b361142012-03-24 14:27:01 +0100336 free(libsym);
337 goto fail;
Petr Machata37d368e2012-03-24 04:58:08 +0100338 }
339
Petr Machataea8eb9a2012-04-17 01:32:07 +0200340 /* XXX The double cast should be removed when
341 * target_address_t becomes integral type. */
342 target_address_t addr = (target_address_t)
343 (uintptr_t)sym.st_value + lte->bias;
Petr Machatae8d90762012-04-15 04:28:31 +0200344 if (library_symbol_init(libsym, addr, sym_name, 1,
345 LS_TOPLT_EXEC) < 0)
346 goto fail2;
Petr Machatab64b5c72012-03-27 03:19:42 +0200347 libsym->arch.type = PPC64PLT_STUB;
Petr Machata37d368e2012-03-24 04:58:08 +0100348 libsym->next = lte->arch.stubs;
349 lte->arch.stubs = libsym;
350 }
351 }
352
Petr Machatae67635d2012-03-21 03:37:39 +0100353 return 0;
354}
Petr Machata37d368e2012-03-24 04:58:08 +0100355
Petr Machata58b2d0f2012-03-28 02:19:20 +0200356static int
357read_plt_slot_value(struct Process *proc, GElf_Addr addr, GElf_Addr *valp)
358{
359 /* on PPC32 we need to do things differently, but PPC64/PPC32
360 * is currently not supported anyway. */
361 assert(host_powerpc64());
362
363 long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
364 if (l == -1 && errno != 0) {
365 error(0, errno, "ptrace .plt slot value @%#" PRIx64, addr);
366 return -1;
367 }
368
369 *valp = (GElf_Addr)l;
370 return 0;
371}
372
373static int
374unresolve_plt_slot(struct Process *proc, GElf_Addr addr, GElf_Addr value)
375{
376 /* We only modify plt_entry[0], which holds the resolved
377 * address of the routine. We keep the TOC and environment
378 * pointers intact. Hence the only adjustment that we need to
379 * do is to IP. */
380 if (ptrace(PTRACE_POKETEXT, proc->pid, addr, value) < 0) {
381 error(0, errno, "unresolve .plt slot");
382 return -1;
383 }
384 return 0;
385}
386
Petr Machata37d368e2012-03-24 04:58:08 +0100387enum plt_status
388arch_elf_add_plt_entry(struct Process *proc, struct ltelf *lte,
Petr Machatad1746d12012-03-27 03:14:14 +0200389 const char *a_name, GElf_Rela *rela, size_t ndx,
Petr Machata37d368e2012-03-24 04:58:08 +0100390 struct library_symbol **ret)
391{
392 if (lte->ehdr.e_machine == EM_PPC)
393 return plt_default;
394
395 /* PPC64. If we have stubs, we return a chain of breakpoint
396 * sites, one for each stub that corresponds to this PLT
397 * entry. */
398 struct library_symbol *chain = NULL;
399 struct library_symbol **symp;
400 for (symp = &lte->arch.stubs; *symp != NULL; ) {
401 struct library_symbol *sym = *symp;
402 if (strcmp(sym->name, a_name) != 0) {
403 symp = &(*symp)->next;
404 continue;
405 }
406
407 /* Re-chain the symbol from stubs to CHAIN. */
408 *symp = sym->next;
409 sym->next = chain;
410 chain = sym;
411 }
412
413 if (chain != NULL) {
Petr Machata37d368e2012-03-24 04:58:08 +0100414 *ret = chain;
415 return plt_ok;
416 }
417
Petr Machatab64b5c72012-03-27 03:19:42 +0200418 /* We don't have stub symbols. Find corresponding .plt slot,
419 * and check whether it contains the corresponding PLT address
420 * (or 0 if the dynamic linker hasn't run yet). N.B. we don't
421 * want read this from ELF file, but from process image. That
422 * makes a difference if we are attaching to a running
423 * process. */
424
425 GElf_Addr plt_entry_addr = arch_plt_sym_val(lte, ndx, rela);
426 GElf_Addr plt_slot_addr = rela->r_offset;
427 assert(plt_slot_addr >= lte->plt_addr
428 || plt_slot_addr < lte->plt_addr + lte->plt_size);
429
Petr Machata58b2d0f2012-03-28 02:19:20 +0200430 GElf_Addr plt_slot_value;
431 if (read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value) < 0)
Petr Machatab64b5c72012-03-27 03:19:42 +0200432 return plt_fail;
Petr Machatab64b5c72012-03-27 03:19:42 +0200433
434 char *name = strdup(a_name);
435 struct library_symbol *libsym = malloc(sizeof(*libsym));
436 if (name == NULL || libsym == NULL) {
437 error(0, errno, "allocation for .plt slot");
438 fail:
439 free(name);
440 free(libsym);
441 return plt_fail;
442 }
443
Petr Machataea8eb9a2012-04-17 01:32:07 +0200444 /* XXX The double cast should be removed when
445 * target_address_t becomes integral type. */
446 if (library_symbol_init(libsym,
447 (target_address_t)(uintptr_t)plt_entry_addr,
Petr Machatae8d90762012-04-15 04:28:31 +0200448 name, 1, LS_TOPLT_EXEC) < 0)
449 goto fail;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200450 libsym->arch.plt_slot_addr = plt_slot_addr;
451
452 if (plt_slot_value == plt_entry_addr || plt_slot_value == 0) {
Petr Machatab64b5c72012-03-27 03:19:42 +0200453 libsym->arch.type = PPC64PLT_UNRESOLVED;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200454 libsym->arch.resolved_value = plt_entry_addr;
455
Petr Machatab64b5c72012-03-27 03:19:42 +0200456 } else {
457 /* Unresolve the .plt slot. If the binary was
458 * prelinked, this makes the code invalid, because in
459 * case of prelinked binary, the dynamic linker
460 * doesn't update .plt[0] and .plt[1] with addresses
461 * of the resover. But we don't care, we will never
462 * need to enter the resolver. That just means that
463 * we have to un-un-resolve this back before we
Petr Machata19c0f292012-04-15 19:09:02 +0200464 * detach. */
Petr Machata58b2d0f2012-03-28 02:19:20 +0200465
Petr Machatae5ebe212012-04-15 04:41:13 +0200466 if (unresolve_plt_slot(proc, plt_slot_addr, plt_entry_addr) < 0) {
467 library_symbol_destroy(libsym);
Petr Machatab64b5c72012-03-27 03:19:42 +0200468 goto fail;
Petr Machatae5ebe212012-04-15 04:41:13 +0200469 }
Petr Machatab64b5c72012-03-27 03:19:42 +0200470 libsym->arch.type = PPC64PLT_RESOLVED;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200471 libsym->arch.resolved_value = plt_slot_value;
Petr Machatab64b5c72012-03-27 03:19:42 +0200472 }
473
474 *ret = libsym;
475 return plt_ok;
Petr Machata37d368e2012-03-24 04:58:08 +0100476}
477
Petr Machata4d9a91c2012-03-24 04:55:03 +0100478void
479arch_elf_destroy(struct ltelf *lte)
480{
Petr Machata37d368e2012-03-24 04:58:08 +0100481 struct library_symbol *sym;
482 for (sym = lte->arch.stubs; sym != NULL; ) {
483 struct library_symbol *next = sym->next;
484 library_symbol_destroy(sym);
485 free(sym);
486 sym = next;
487 }
Petr Machata4d9a91c2012-03-24 04:55:03 +0100488}
Petr Machatab64b5c72012-03-27 03:19:42 +0200489
Petr Machata6b314182012-04-15 04:40:45 +0200490static void
491dl_plt_update_bp_on_hit(struct breakpoint *bp, struct Process *proc)
492{
493 struct process_stopping_handler *self = proc->arch.handler;
494 assert(self != NULL);
495
496 struct library_symbol *libsym = self->breakpoint_being_enabled->libsym;
497 GElf_Addr value;
498 if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0)
499 return;
500
501 /* cb_on_all_stopped looks if HANDLER is set to NULL as a way
502 * to check that this was run. It's an error if it
503 * wasn't. */
504 breakpoint_turn_off(bp, proc);
505 proc->arch.handler = NULL;
506}
507
508static void
509cb_on_all_stopped(struct process_stopping_handler *self)
510{
511 /* Put that in for dl_plt_update_bp_on_hit to see. */
512 assert(self->task_enabling_breakpoint->arch.handler == NULL);
513 self->task_enabling_breakpoint->arch.handler = self;
514
515 linux_ptrace_disable_and_continue(self);
516}
517
Petr Machata58b2d0f2012-03-28 02:19:20 +0200518static enum callback_status
Petr Machata6b314182012-04-15 04:40:45 +0200519cb_keep_stepping_p(struct process_stopping_handler *self)
Petr Machatab64b5c72012-03-27 03:19:42 +0200520{
Petr Machata58b2d0f2012-03-28 02:19:20 +0200521 struct Process *proc = self->task_enabling_breakpoint;
522 struct library_symbol *libsym = self->breakpoint_being_enabled->libsym;
523 GElf_Addr value;
524 if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0)
525 return CBS_FAIL;
526
527 /* In UNRESOLVED state, the RESOLVED_VALUE in fact contains
528 * the PLT entry value. */
529 if (value == libsym->arch.resolved_value)
530 return CBS_CONT;
531
532 /* The .plt slot got resolved! We can migrate the breakpoint
533 * to RESOLVED and stop single-stepping. */
534 if (unresolve_plt_slot(proc, libsym->arch.plt_slot_addr,
535 libsym->arch.resolved_value) < 0)
536 return CBS_FAIL;
Petr Machata6b314182012-04-15 04:40:45 +0200537
538 /* Install breakpoint to the address where the change takes
539 * place. If we fail, then that just means that we'll have to
540 * singlestep the next time around as well. */
541 struct Process *leader = proc->leader;
542 if (leader == NULL || leader->arch.dl_plt_update_bp != NULL)
543 goto resolve;
544
545 /* We need to install to the next instruction. ADDR points to
546 * a store instruction, so moving the breakpoint one
547 * instruction forward is safe. */
548 target_address_t addr = get_instruction_pointer(proc) + 4;
549 leader->arch.dl_plt_update_bp = insert_breakpoint(proc, addr, NULL);
550
551 /* Turn it off for now. We will turn it on again when we hit
552 * the PLT entry that needs this. */
553 breakpoint_turn_off(leader->arch.dl_plt_update_bp, proc);
554
555 if (leader->arch.dl_plt_update_bp != NULL) {
556 static struct bp_callbacks dl_plt_update_cbs = {
557 .on_hit = dl_plt_update_bp_on_hit,
558 };
559 leader->arch.dl_plt_update_bp->cbs = &dl_plt_update_cbs;
560 }
561
562resolve:
Petr Machata58b2d0f2012-03-28 02:19:20 +0200563 libsym->arch.type = PPC64PLT_RESOLVED;
564 libsym->arch.resolved_value = value;
565
566 return CBS_STOP;
Petr Machatab64b5c72012-03-27 03:19:42 +0200567}
568
Petr Machata58b2d0f2012-03-28 02:19:20 +0200569static void
570ppc64_plt_bp_continue(struct breakpoint *bp, struct Process *proc)
571{
Petr Machata58b2d0f2012-03-28 02:19:20 +0200572 switch (bp->libsym->arch.type) {
573 target_address_t rv;
Petr Machata6b314182012-04-15 04:40:45 +0200574 struct Process *leader;
575 void (*on_all_stopped)(struct process_stopping_handler *);
576 enum callback_status (*keep_stepping_p)
577 (struct process_stopping_handler *);
578
Petr Machata58b2d0f2012-03-28 02:19:20 +0200579 case PPC64PLT_UNRESOLVED:
Petr Machata6b314182012-04-15 04:40:45 +0200580 on_all_stopped = NULL;
581 keep_stepping_p = NULL;
582 leader = proc->leader;
583
584 if (leader != NULL && leader->arch.dl_plt_update_bp != NULL) {
585 if (breakpoint_turn_on(leader->arch.dl_plt_update_bp,
586 proc) < 0)
587 goto stepping;
588 on_all_stopped = cb_on_all_stopped;
589 } else {
590 stepping:
591 keep_stepping_p = cb_keep_stepping_p;
592 }
593
594 if (process_install_stopping_handler
595 (proc, bp, on_all_stopped, keep_stepping_p, NULL) < 0) {
Petr Machata58b2d0f2012-03-28 02:19:20 +0200596 perror("ppc64_unresolved_bp_continue: couldn't install"
597 " event handler");
598 continue_after_breakpoint(proc, bp);
599 }
600 return;
601
602 case PPC64PLT_RESOLVED:
Petr Machataea8eb9a2012-04-17 01:32:07 +0200603 /* XXX The double cast should be removed when
604 * target_address_t becomes integral type. */
605 rv = (target_address_t)
606 (uintptr_t)bp->libsym->arch.resolved_value;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200607 set_instruction_pointer(proc, rv);
608 continue_process(proc->pid);
Petr Machata50969622012-04-06 16:06:26 +0200609 return;
610
Petr Machatafbd97422012-04-16 21:09:18 +0200611 case PPC_DEFAULT:
Petr Machata50969622012-04-06 16:06:26 +0200612 case PPC64PLT_STUB:
Petr Machatafbd97422012-04-16 21:09:18 +0200613 /* These should never hit here. */
Petr Machata50969622012-04-06 16:06:26 +0200614 break;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200615 }
Petr Machata50969622012-04-06 16:06:26 +0200616
617 assert(bp->libsym->arch.type != bp->libsym->arch.type);
618 abort();
Petr Machata58b2d0f2012-03-28 02:19:20 +0200619}
620
Petr Machata24c6e9d2012-04-15 04:31:34 +0200621int
622arch_library_symbol_init(struct library_symbol *libsym)
623{
624 /* We set type explicitly in the code above, where we have the
625 * necessary context. This is for calls from ltrace-elf.c and
626 * such. */
Petr Machatafbd97422012-04-16 21:09:18 +0200627 libsym->arch.type = PPC_DEFAULT;
Petr Machata24c6e9d2012-04-15 04:31:34 +0200628 return 0;
629}
630
631void
632arch_library_symbol_destroy(struct library_symbol *libsym)
633{
634}
635
636int
637arch_library_symbol_clone(struct library_symbol *retp,
638 struct library_symbol *libsym)
639{
640 retp->arch = libsym->arch;
641 return 0;
642}
643
Petr Machata52dbfb12012-03-29 16:38:26 +0200644/* For some symbol types, we need to set up custom callbacks. XXX we
645 * don't need PROC here, we can store the data in BP if it is of
646 * interest to us. */
Petr Machatab64b5c72012-03-27 03:19:42 +0200647int
648arch_breakpoint_init(struct Process *proc, struct breakpoint *bp)
649{
650 if (proc->e_machine == EM_PPC
Petr Machata052b5f12012-04-06 14:53:07 +0200651 || bp->libsym == NULL)
652 return 0;
653
Petr Machatafbd97422012-04-16 21:09:18 +0200654 /* Entry point breakpoints (LS_TOPLT_NONE) and stub PLT
655 * breakpoints need no special handling. */
Petr Machata052b5f12012-04-06 14:53:07 +0200656 if (bp->libsym->plt_type != LS_TOPLT_EXEC
Petr Machatab64b5c72012-03-27 03:19:42 +0200657 || bp->libsym->arch.type == PPC64PLT_STUB)
658 return 0;
659
Petr Machata58b2d0f2012-03-28 02:19:20 +0200660 static struct bp_callbacks cbs = {
661 .on_continue = ppc64_plt_bp_continue,
662 };
663 breakpoint_set_callbacks(bp, &cbs);
Petr Machatab64b5c72012-03-27 03:19:42 +0200664 return 0;
665}
666
667void
668arch_breakpoint_destroy(struct breakpoint *bp)
669{
670}
Petr Machatad3cc9882012-04-13 21:40:23 +0200671
672int
673arch_breakpoint_clone(struct breakpoint *retp, struct breakpoint *sbp)
674{
675 retp->arch = sbp->arch;
676 return 0;
677}
Petr Machata6b314182012-04-15 04:40:45 +0200678
679int
680arch_process_init(struct Process *proc)
681{
682 proc->arch.dl_plt_update_bp = NULL;
683 proc->arch.handler = NULL;
684 return 0;
685}
686
687void
688arch_process_destroy(struct Process *proc)
689{
690}
691
692int
693arch_process_clone(struct Process *retp, struct Process *proc)
694{
695 retp->arch = proc->arch;
696 return 0;
697}
698
699int
700arch_process_exec(struct Process *proc)
701{
702 return arch_process_init(proc);
703}