blob: 7e19441e3edfd45ed57e2ad1b3aaaad89add8ac1 [file] [log] [blame]
Juan Cespedesd914a202004-11-10 00:15:33 +01001#include <gelf.h>
Juan Cespedesa7af00d2009-07-26 13:23:18 +02002#include <sys/ptrace.h>
Petr Machatae67635d2012-03-21 03:37:39 +01003#include <errno.h>
4#include <error.h>
5#include <inttypes.h>
6#include <assert.h>
Petr Machata37d368e2012-03-24 04:58:08 +01007#include <string.h>
Petr Machatae67635d2012-03-21 03:37:39 +01008
Petr Machata366c2f42012-02-09 19:34:36 +01009#include "proc.h"
Juan Cespedesf7281232009-06-25 16:11:21 +020010#include "common.h"
Petr Machatae67635d2012-03-21 03:37:39 +010011#include "library.h"
Petr Machatab64b5c72012-03-27 03:19:42 +020012#include "breakpoint.h"
Petr Machata58b2d0f2012-03-28 02:19:20 +020013#include "linux-gnu/trace.h"
Petr Machatae67635d2012-03-21 03:37:39 +010014
Petr Machata37d368e2012-03-24 04:58:08 +010015/* There are two PLT types on 32-bit PPC: old-style, BSS PLT, and
16 * new-style "secure" PLT. We can tell one from the other by the
17 * flags on the .plt section. If it's +X (executable), it's BSS PLT,
18 * otherwise it's secure.
19 *
20 * BSS PLT works the same way as most architectures: the .plt section
21 * contains trampolines and we put breakpoints to those. With secure
22 * PLT, the .plt section doesn't contain instructions but addresses.
23 * The real PLT table is stored in .text. Addresses of those PLT
24 * entries can be computed, and it fact that's what the glink deal
25 * below does.
26 *
27 * If not prelinked, BSS PLT entries in the .plt section contain
28 * zeroes that are overwritten by the dynamic linker during start-up.
29 * For that reason, ltrace realizes those breakpoints only after
Petr Machatad9573322012-04-17 05:21:02 +020030 * _start is hit.
Petr Machata37d368e2012-03-24 04:58:08 +010031 *
32 * 64-bit PPC is more involved. Program linker creates for each
33 * library call a _stub_ symbol named xxxxxxxx.plt_call.<callee>
34 * (where xxxxxxxx is a hexadecimal number). That stub does the call
35 * dispatch: it loads an address of a function to call from the
36 * section .plt, and branches. PLT entries themselves are essentially
37 * a curried call to the resolver. When the symbol is resolved, the
38 * resolver updates the value stored in .plt, and the next time
39 * around, the stub calls the library function directly. So we make
40 * at most one trip (none if the binary is prelinked) through each PLT
41 * entry, and correspondingly that is useless as a breakpoint site.
42 *
43 * Note the three confusing terms: stubs (that play the role of PLT
44 * entries), PLT entries, .plt section.
45 *
46 * We first check symbol tables and see if we happen to have stub
47 * symbols available. If yes we just put breakpoints to those, and
48 * treat them as usual breakpoints. The only tricky part is realizing
49 * that there can be more than one breakpoint per symbol.
50 *
51 * The case that we don't have the stub symbols available is harder.
52 * The following scheme uses two kinds of PLT breakpoints: unresolved
53 * and resolved (to some address). When the process starts (or when
54 * we attach), we distribute unresolved PLT breakpoints to the PLT
55 * entries (not stubs). Then we look in .plt, and for each entry
56 * whose value is different than the corresponding PLT entry address,
57 * we assume it was already resolved, and convert the breakpoint to
58 * resolved. We also rewrite the resolved value in .plt back to the
59 * PLT address.
60 *
61 * When a PLT entry hits a resolved breakpoint (which happens because
Petr Machata19c0f292012-04-15 19:09:02 +020062 * we rewrite .plt with the original unresolved addresses), we move
63 * the instruction pointer to the corresponding address and continue
64 * the process as if nothing happened.
Petr Machata37d368e2012-03-24 04:58:08 +010065 *
66 * When unresolved PLT entry is called for the first time, we need to
67 * catch the new value that the resolver will write to a .plt slot.
68 * We also need to prevent another thread from racing through and
69 * taking the branch without ltrace noticing. So when unresolved PLT
70 * entry hits, we have to stop all threads. We then single-step
71 * through the resolver, until the .plt slot changes. When it does,
72 * we treat it the same way as above: convert the PLT breakpoint to
73 * resolved, and rewrite the .plt value back to PLT address. We then
74 * start all threads again.
75 *
Petr Machata19c0f292012-04-15 19:09:02 +020076 * As an optimization, we remember the address where the address was
77 * resolved, and put a breakpoint there. The next time around (when
78 * the next PLT entry is to be resolved), instead of single-stepping
79 * through half the dynamic linker, we just let the thread run and hit
80 * this breakpoint. When it hits, we know the PLT entry was resolved.
Petr Machata58b2d0f2012-03-28 02:19:20 +020081 *
Petr Machata19c0f292012-04-15 19:09:02 +020082 * XXX TODO As an additional optimization, after the above is done, we
83 * might emulate the instruction that updates .plt. We would compute
84 * the resolved address, and instead of letting the dynamic linker put
85 * it in .plt, we would resolve the breakpoint to that address. This
86 * way we wouldn't need to stop other threads. Otherwise there's no
Petr Machata58b2d0f2012-03-28 02:19:20 +020087 * way around that. Unless we know where the stubs are, we don't have
88 * a way to catch a thread that would use the window of opportunity
Petr Machata19c0f292012-04-15 19:09:02 +020089 * between updating .plt and notifying ltrace that it happened.
90 *
91 * XXX TODO If we have hardware watch point, we might put a read watch
92 * on .plt slot, and discover the offenders this way. I don't know
93 * the details, but I assume at most a handful (like, one or two, if
94 * available at all) addresses may be watched at a time, and thus this
95 * would be used as an amendment of the above rather than full-on
96 * solution to PLT tracing on PPC.
Petr Machata37d368e2012-03-24 04:58:08 +010097 */
98
Petr Machatae67635d2012-03-21 03:37:39 +010099#define PPC_PLT_STUB_SIZE 16
Petr Machatab64b5c72012-03-27 03:19:42 +0200100#define PPC64_PLT_STUB_SIZE 8 //xxx
Petr Machatae67635d2012-03-21 03:37:39 +0100101
102static inline int
Petr Machata4e2073f2012-03-21 05:15:44 +0100103host_powerpc64()
Petr Machatae67635d2012-03-21 03:37:39 +0100104{
105#ifdef __powerpc64__
106 return 1;
107#else
108 return 0;
109#endif
110}
111
Petr Machatad9573322012-04-17 05:21:02 +0200112static enum callback_status
113reenable_breakpoint(struct Process *proc, struct breakpoint *bp, void *data)
114{
115 /* We don't need to re-enable non-PLT breakpoints and
116 * breakpoints that are not PPC32 BSS unprelinked. */
117 if (bp->libsym == NULL
118 || bp->libsym->plt_type == LS_TOPLT_NONE
119 || bp->libsym->lib->arch.bss_plt_prelinked != 0)
120 return CBS_CONT;
121
122 debug(DEBUG_PROCESS, "pid=%d reenable_breakpoint %s",
123 proc->pid, breakpoint_name(bp));
124
125 /* Re-enable the breakpoint that was overwritten by the
126 * dynamic linker. XXX unfortunately it's overwritten
127 * again after the first call :-/ */
128 enable_breakpoint(proc, bp);
129
130 return CBS_CONT;
131}
132
133void
134arch_dynlink_done(struct Process *proc)
135{
136 /* On PPC32, .plt of objects that use BSS PLT are overwritten
137 * by the dynamic linker (unless that object was prelinked).
138 * We need to re-enable breakpoints in those objects. */
139 proc_each_breakpoint(proc, NULL, reenable_breakpoint, NULL);
140}
141
Juan Cespedesf1350522008-12-16 18:19:58 +0100142GElf_Addr
Petr Machata4e2073f2012-03-21 05:15:44 +0100143arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela *rela)
144{
145 if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) {
146 assert(lte->arch.plt_stub_vma != 0);
Petr Machatae67635d2012-03-21 03:37:39 +0100147 return lte->arch.plt_stub_vma + PPC_PLT_STUB_SIZE * ndx;
Petr Machata4e2073f2012-03-21 05:15:44 +0100148
149 } else if (lte->ehdr.e_machine == EM_PPC) {
Petr Machatae67635d2012-03-21 03:37:39 +0100150 return rela->r_offset;
Petr Machata4e2073f2012-03-21 05:15:44 +0100151
152 } else {
Petr Machatab64b5c72012-03-27 03:19:42 +0200153 /* If we get here, we don't have stub symbols. In
154 * that case we put brakpoints to PLT entries the same
155 * as the PPC32 secure PLT case does. */
156 assert(lte->arch.plt_stub_vma != 0);
157 return lte->arch.plt_stub_vma + PPC64_PLT_STUB_SIZE * ndx;
Petr Machata4e2073f2012-03-21 05:15:44 +0100158 }
Petr Machatae67635d2012-03-21 03:37:39 +0100159}
160
161int
162arch_translate_address(struct Process *proc,
163 target_address_t addr, target_address_t *ret)
164{
Petr Machatab64b5c72012-03-27 03:19:42 +0200165 if (proc->e_machine == EM_PPC64) {
166 assert(host_powerpc64());
Petr Machatae67635d2012-03-21 03:37:39 +0100167 long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
Petr Machatae67635d2012-03-21 03:37:39 +0100168 if (l == -1 && errno) {
169 error(0, errno, ".opd translation of %p", addr);
170 return -1;
171 }
172 *ret = (target_address_t)l;
173 return 0;
174 }
175
176 *ret = addr;
177 return 0;
Juan Cespedesd914a202004-11-10 00:15:33 +0100178}
Ian Wienand9a2ad352006-02-20 22:44:45 +0100179
Juan Cespedesf1350522008-12-16 18:19:58 +0100180void *
Petr Machata18c801c2012-04-07 01:24:08 +0200181sym2addr(struct Process *proc, struct library_symbol *sym)
182{
183 return sym->enter_addr;
Ian Wienand9a2ad352006-02-20 22:44:45 +0100184}
Petr Machatae67635d2012-03-21 03:37:39 +0100185
186static GElf_Addr
187get_glink_vma(struct ltelf *lte, GElf_Addr ppcgot, Elf_Data *plt_data)
188{
189 Elf_Scn *ppcgot_sec = NULL;
190 GElf_Shdr ppcgot_shdr;
191 if (ppcgot != 0
192 && elf_get_section_covering(lte, ppcgot,
193 &ppcgot_sec, &ppcgot_shdr) < 0)
Petr Machata8b00d5b2012-04-06 16:05:10 +0200194 error(0, 0, "DT_PPC_GOT=%#"PRIx64", but no such section found",
195 ppcgot);
Petr Machatae67635d2012-03-21 03:37:39 +0100196
197 if (ppcgot_sec != NULL) {
198 Elf_Data *data = elf_loaddata(ppcgot_sec, &ppcgot_shdr);
199 if (data == NULL || data->d_size < 8 ) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200200 error(0, 0, "couldn't read GOT data");
Petr Machatae67635d2012-03-21 03:37:39 +0100201 } else {
202 // where PPCGOT begins in .got
203 size_t offset = ppcgot - ppcgot_shdr.sh_addr;
204 assert(offset % 4 == 0);
205 uint32_t glink_vma;
206 if (elf_read_u32(data, offset + 4, &glink_vma) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200207 error(0, 0, "couldn't read glink VMA address"
208 " at %zd@GOT", offset);
Petr Machatae67635d2012-03-21 03:37:39 +0100209 return 0;
210 }
211 if (glink_vma != 0) {
212 debug(1, "PPC GOT glink_vma address: %#" PRIx32,
213 glink_vma);
Petr Machatae67635d2012-03-21 03:37:39 +0100214 return (GElf_Addr)glink_vma;
215 }
216 }
217 }
218
219 if (plt_data != NULL) {
220 uint32_t glink_vma;
221 if (elf_read_u32(plt_data, 0, &glink_vma) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200222 error(0, 0, "couldn't read glink VMA address");
Petr Machatae67635d2012-03-21 03:37:39 +0100223 return 0;
224 }
225 debug(1, ".plt glink_vma address: %#" PRIx32, glink_vma);
Petr Machatae67635d2012-03-21 03:37:39 +0100226 return (GElf_Addr)glink_vma;
227 }
228
229 return 0;
230}
231
Petr Machata644d6692012-03-24 02:06:48 +0100232static int
Petr Machatad1746d12012-03-27 03:14:14 +0200233load_dynamic_entry(struct ltelf *lte, int tag, GElf_Addr *valuep)
Petr Machatae67635d2012-03-21 03:37:39 +0100234{
Petr Machata644d6692012-03-24 02:06:48 +0100235 Elf_Scn *scn;
236 GElf_Shdr shdr;
237 if (elf_get_section_type(lte, SHT_DYNAMIC, &scn, &shdr) < 0
238 || scn == NULL) {
239 fail:
240 error(0, 0, "Couldn't get SHT_DYNAMIC: %s",
241 elf_errmsg(-1));
242 return -1;
Petr Machatae67635d2012-03-21 03:37:39 +0100243 }
Petr Machata644d6692012-03-24 02:06:48 +0100244
245 Elf_Data *data = elf_loaddata(scn, &shdr);
246 if (data == NULL)
247 goto fail;
248
249 size_t j;
250 for (j = 0; j < shdr.sh_size / shdr.sh_entsize; ++j) {
251 GElf_Dyn dyn;
252 if (gelf_getdyn(data, j, &dyn) == NULL)
253 goto fail;
254
Petr Machatad1746d12012-03-27 03:14:14 +0200255 if(dyn.d_tag == tag) {
256 *valuep = dyn.d_un.d_ptr;
Petr Machata644d6692012-03-24 02:06:48 +0100257 return 0;
258 }
259 }
260
261 return -1;
Petr Machatae67635d2012-03-21 03:37:39 +0100262}
263
Petr Machatad1746d12012-03-27 03:14:14 +0200264static int
265load_ppcgot(struct ltelf *lte, GElf_Addr *ppcgotp)
266{
267 return load_dynamic_entry(lte, DT_PPC_GOT, ppcgotp);
268}
269
Petr Machatab64b5c72012-03-27 03:19:42 +0200270static int
271load_ppc64_glink(struct ltelf *lte, GElf_Addr *glinkp)
272{
273 return load_dynamic_entry(lte, DT_PPC64_GLINK, glinkp);
274}
275
Petr Machatad9573322012-04-17 05:21:02 +0200276static int
277nonzero_data(Elf_Data *data)
278{
279 /* We are not supposed to get here if there are no PLT data in
280 * the binary. */
281 assert(data != NULL);
282
283 unsigned char *buf = data->d_buf;
284 if (buf == NULL)
285 return 0;
286
287 size_t i;
288 for (i = 0; i < data->d_size; ++i)
289 if (buf[i] != 0)
290 return 1;
291 return 0;
292}
293
Petr Machatae67635d2012-03-21 03:37:39 +0100294int
Petr Machatad9573322012-04-17 05:21:02 +0200295arch_elf_init(struct ltelf *lte, struct library *lib)
Petr Machatae67635d2012-03-21 03:37:39 +0100296{
Petr Machata18c801c2012-04-07 01:24:08 +0200297 lte->arch.secure_plt = !(lte->plt_flags & SHF_EXECINSTR);
Petr Machatad9573322012-04-17 05:21:02 +0200298
299 /* For PPC32 BSS, it is important whether the binary was
300 * prelinked. If .plt section is NODATA, or if it contains
301 * zeroes, then this library is not prelinked, and we need to
302 * delay breakpoints. */
303 if (lte->ehdr.e_machine == EM_PPC && !lte->arch.secure_plt)
304 lib->arch.bss_plt_prelinked = nonzero_data(lte->plt_data);
305 else
306 /* For cases where it's irrelevant, initialize the
307 * value to something conspicuous. */
308 lib->arch.bss_plt_prelinked = -1;
309
Petr Machata4e2073f2012-03-21 05:15:44 +0100310 if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) {
Petr Machata644d6692012-03-24 02:06:48 +0100311 GElf_Addr ppcgot;
312 if (load_ppcgot(lte, &ppcgot) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200313 error(0, 0, "couldn't find DT_PPC_GOT");
Petr Machata644d6692012-03-24 02:06:48 +0100314 return -1;
315 }
316 GElf_Addr glink_vma = get_glink_vma(lte, ppcgot, lte->plt_data);
Petr Machatae67635d2012-03-21 03:37:39 +0100317
318 assert (lte->relplt_size % 12 == 0);
319 size_t count = lte->relplt_size / 12; // size of RELA entry
320 lte->arch.plt_stub_vma = glink_vma
321 - (GElf_Addr)count * PPC_PLT_STUB_SIZE;
322 debug(1, "stub_vma is %#" PRIx64, lte->arch.plt_stub_vma);
Petr Machatab64b5c72012-03-27 03:19:42 +0200323
324 } else if (lte->ehdr.e_machine == EM_PPC64) {
325 GElf_Addr glink_vma;
326 if (load_ppc64_glink(lte, &glink_vma) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200327 error(0, 0, "couldn't find DT_PPC64_GLINK");
Petr Machatab64b5c72012-03-27 03:19:42 +0200328 return -1;
329 }
330
331 /* The first glink stub starts at offset 32. */
332 lte->arch.plt_stub_vma = glink_vma + 32;
Petr Machatae67635d2012-03-21 03:37:39 +0100333 }
334
Petr Machata37d368e2012-03-24 04:58:08 +0100335 /* On PPC64, look for stub symbols in symbol table. These are
336 * called: xxxxxxxx.plt_call.callee_name@version+addend. */
337 if (lte->ehdr.e_machine == EM_PPC64
338 && lte->symtab != NULL && lte->strtab != NULL) {
339
340 /* N.B. We can't simply skip the symbols that we fail
341 * to read or malloc. There may be more than one stub
342 * per symbol name, and if we failed in one but
343 * succeeded in another, the PLT enabling code would
344 * have no way to tell that something is missing. We
345 * could work around that, of course, but it doesn't
Petr Machata7b361142012-03-24 14:27:01 +0100346 * seem worth the trouble. So if anything fails, we
347 * just pretend that we don't have stub symbols at
348 * all, as if the binary is stripped. */
Petr Machata37d368e2012-03-24 04:58:08 +0100349
350 size_t i;
351 for (i = 0; i < lte->symtab_count; ++i) {
352 GElf_Sym sym;
Petr Machata7b361142012-03-24 14:27:01 +0100353 if (gelf_getsym(lte->symtab, i, &sym) == NULL) {
354 struct library_symbol *sym, *next;
355 fail:
356 for (sym = lte->arch.stubs; sym != NULL; ) {
357 next = sym->next;
358 library_symbol_destroy(sym);
359 free(sym);
360 sym = next;
361 }
362 lte->arch.stubs = NULL;
Petr Machata37d368e2012-03-24 04:58:08 +0100363 break;
Petr Machata7b361142012-03-24 14:27:01 +0100364 }
Petr Machata37d368e2012-03-24 04:58:08 +0100365
366 const char *name = lte->strtab + sym.st_name;
367
368#define STUBN ".plt_call."
369 if ((name = strstr(name, STUBN)) == NULL)
370 continue;
371 name += sizeof(STUBN) - 1;
372#undef STUBN
373
374 size_t len;
375 const char *ver = strchr(name, '@');
376 if (ver != NULL) {
377 len = ver - name;
378
379 } else {
380 /* If there is "+" at all, check that
381 * the symbol name ends in "+0". */
382 const char *add = strrchr(name, '+');
383 if (add != NULL) {
384 assert(strcmp(add, "+0") == 0);
385 len = add - name;
386 } else {
387 len = strlen(name);
388 }
389 }
390
391 char *sym_name = strndup(name, len);
Petr Machata7b361142012-03-24 14:27:01 +0100392 struct library_symbol *libsym = malloc(sizeof(*libsym));
393 if (sym_name == NULL || libsym == NULL) {
Petr Machatae8d90762012-04-15 04:28:31 +0200394 fail2:
Petr Machata37d368e2012-03-24 04:58:08 +0100395 free(sym_name);
Petr Machata7b361142012-03-24 14:27:01 +0100396 free(libsym);
397 goto fail;
Petr Machata37d368e2012-03-24 04:58:08 +0100398 }
399
Petr Machataea8eb9a2012-04-17 01:32:07 +0200400 /* XXX The double cast should be removed when
401 * target_address_t becomes integral type. */
402 target_address_t addr = (target_address_t)
403 (uintptr_t)sym.st_value + lte->bias;
Petr Machatae8d90762012-04-15 04:28:31 +0200404 if (library_symbol_init(libsym, addr, sym_name, 1,
405 LS_TOPLT_EXEC) < 0)
406 goto fail2;
Petr Machatab64b5c72012-03-27 03:19:42 +0200407 libsym->arch.type = PPC64PLT_STUB;
Petr Machata37d368e2012-03-24 04:58:08 +0100408 libsym->next = lte->arch.stubs;
409 lte->arch.stubs = libsym;
410 }
411 }
412
Petr Machatae67635d2012-03-21 03:37:39 +0100413 return 0;
414}
Petr Machata37d368e2012-03-24 04:58:08 +0100415
Petr Machata58b2d0f2012-03-28 02:19:20 +0200416static int
417read_plt_slot_value(struct Process *proc, GElf_Addr addr, GElf_Addr *valp)
418{
419 /* on PPC32 we need to do things differently, but PPC64/PPC32
420 * is currently not supported anyway. */
421 assert(host_powerpc64());
422
423 long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
424 if (l == -1 && errno != 0) {
425 error(0, errno, "ptrace .plt slot value @%#" PRIx64, addr);
426 return -1;
427 }
428
429 *valp = (GElf_Addr)l;
430 return 0;
431}
432
433static int
434unresolve_plt_slot(struct Process *proc, GElf_Addr addr, GElf_Addr value)
435{
436 /* We only modify plt_entry[0], which holds the resolved
437 * address of the routine. We keep the TOC and environment
438 * pointers intact. Hence the only adjustment that we need to
439 * do is to IP. */
440 if (ptrace(PTRACE_POKETEXT, proc->pid, addr, value) < 0) {
441 error(0, errno, "unresolve .plt slot");
442 return -1;
443 }
444 return 0;
445}
446
Petr Machata37d368e2012-03-24 04:58:08 +0100447enum plt_status
448arch_elf_add_plt_entry(struct Process *proc, struct ltelf *lte,
Petr Machatad1746d12012-03-27 03:14:14 +0200449 const char *a_name, GElf_Rela *rela, size_t ndx,
Petr Machata37d368e2012-03-24 04:58:08 +0100450 struct library_symbol **ret)
451{
452 if (lte->ehdr.e_machine == EM_PPC)
453 return plt_default;
454
455 /* PPC64. If we have stubs, we return a chain of breakpoint
456 * sites, one for each stub that corresponds to this PLT
457 * entry. */
458 struct library_symbol *chain = NULL;
459 struct library_symbol **symp;
460 for (symp = &lte->arch.stubs; *symp != NULL; ) {
461 struct library_symbol *sym = *symp;
462 if (strcmp(sym->name, a_name) != 0) {
463 symp = &(*symp)->next;
464 continue;
465 }
466
467 /* Re-chain the symbol from stubs to CHAIN. */
468 *symp = sym->next;
469 sym->next = chain;
470 chain = sym;
471 }
472
473 if (chain != NULL) {
Petr Machata37d368e2012-03-24 04:58:08 +0100474 *ret = chain;
475 return plt_ok;
476 }
477
Petr Machatab64b5c72012-03-27 03:19:42 +0200478 /* We don't have stub symbols. Find corresponding .plt slot,
479 * and check whether it contains the corresponding PLT address
480 * (or 0 if the dynamic linker hasn't run yet). N.B. we don't
481 * want read this from ELF file, but from process image. That
482 * makes a difference if we are attaching to a running
483 * process. */
484
485 GElf_Addr plt_entry_addr = arch_plt_sym_val(lte, ndx, rela);
486 GElf_Addr plt_slot_addr = rela->r_offset;
487 assert(plt_slot_addr >= lte->plt_addr
488 || plt_slot_addr < lte->plt_addr + lte->plt_size);
489
Petr Machata58b2d0f2012-03-28 02:19:20 +0200490 GElf_Addr plt_slot_value;
491 if (read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value) < 0)
Petr Machatab64b5c72012-03-27 03:19:42 +0200492 return plt_fail;
Petr Machatab64b5c72012-03-27 03:19:42 +0200493
494 char *name = strdup(a_name);
495 struct library_symbol *libsym = malloc(sizeof(*libsym));
496 if (name == NULL || libsym == NULL) {
497 error(0, errno, "allocation for .plt slot");
498 fail:
499 free(name);
500 free(libsym);
501 return plt_fail;
502 }
503
Petr Machataea8eb9a2012-04-17 01:32:07 +0200504 /* XXX The double cast should be removed when
505 * target_address_t becomes integral type. */
506 if (library_symbol_init(libsym,
507 (target_address_t)(uintptr_t)plt_entry_addr,
Petr Machatae8d90762012-04-15 04:28:31 +0200508 name, 1, LS_TOPLT_EXEC) < 0)
509 goto fail;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200510 libsym->arch.plt_slot_addr = plt_slot_addr;
511
512 if (plt_slot_value == plt_entry_addr || plt_slot_value == 0) {
Petr Machatab64b5c72012-03-27 03:19:42 +0200513 libsym->arch.type = PPC64PLT_UNRESOLVED;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200514 libsym->arch.resolved_value = plt_entry_addr;
515
Petr Machatab64b5c72012-03-27 03:19:42 +0200516 } else {
517 /* Unresolve the .plt slot. If the binary was
518 * prelinked, this makes the code invalid, because in
519 * case of prelinked binary, the dynamic linker
520 * doesn't update .plt[0] and .plt[1] with addresses
521 * of the resover. But we don't care, we will never
522 * need to enter the resolver. That just means that
523 * we have to un-un-resolve this back before we
Petr Machata19c0f292012-04-15 19:09:02 +0200524 * detach. */
Petr Machata58b2d0f2012-03-28 02:19:20 +0200525
Petr Machatae5ebe212012-04-15 04:41:13 +0200526 if (unresolve_plt_slot(proc, plt_slot_addr, plt_entry_addr) < 0) {
527 library_symbol_destroy(libsym);
Petr Machatab64b5c72012-03-27 03:19:42 +0200528 goto fail;
Petr Machatae5ebe212012-04-15 04:41:13 +0200529 }
Petr Machatab64b5c72012-03-27 03:19:42 +0200530 libsym->arch.type = PPC64PLT_RESOLVED;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200531 libsym->arch.resolved_value = plt_slot_value;
Petr Machatab64b5c72012-03-27 03:19:42 +0200532 }
533
534 *ret = libsym;
535 return plt_ok;
Petr Machata37d368e2012-03-24 04:58:08 +0100536}
537
Petr Machata4d9a91c2012-03-24 04:55:03 +0100538void
539arch_elf_destroy(struct ltelf *lte)
540{
Petr Machata37d368e2012-03-24 04:58:08 +0100541 struct library_symbol *sym;
542 for (sym = lte->arch.stubs; sym != NULL; ) {
543 struct library_symbol *next = sym->next;
544 library_symbol_destroy(sym);
545 free(sym);
546 sym = next;
547 }
Petr Machata4d9a91c2012-03-24 04:55:03 +0100548}
Petr Machatab64b5c72012-03-27 03:19:42 +0200549
Petr Machata6b314182012-04-15 04:40:45 +0200550static void
551dl_plt_update_bp_on_hit(struct breakpoint *bp, struct Process *proc)
552{
553 struct process_stopping_handler *self = proc->arch.handler;
554 assert(self != NULL);
555
556 struct library_symbol *libsym = self->breakpoint_being_enabled->libsym;
557 GElf_Addr value;
558 if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0)
559 return;
560
Petr Machata72b5ee82012-04-17 13:44:06 +0200561 unresolve_plt_slot(proc, libsym->arch.plt_slot_addr,
562 libsym->arch.resolved_value);
563
Petr Machata6b314182012-04-15 04:40:45 +0200564 /* cb_on_all_stopped looks if HANDLER is set to NULL as a way
565 * to check that this was run. It's an error if it
566 * wasn't. */
567 breakpoint_turn_off(bp, proc);
568 proc->arch.handler = NULL;
569}
570
571static void
572cb_on_all_stopped(struct process_stopping_handler *self)
573{
574 /* Put that in for dl_plt_update_bp_on_hit to see. */
575 assert(self->task_enabling_breakpoint->arch.handler == NULL);
576 self->task_enabling_breakpoint->arch.handler = self;
577
578 linux_ptrace_disable_and_continue(self);
579}
580
Petr Machata58b2d0f2012-03-28 02:19:20 +0200581static enum callback_status
Petr Machata6b314182012-04-15 04:40:45 +0200582cb_keep_stepping_p(struct process_stopping_handler *self)
Petr Machatab64b5c72012-03-27 03:19:42 +0200583{
Petr Machata58b2d0f2012-03-28 02:19:20 +0200584 struct Process *proc = self->task_enabling_breakpoint;
585 struct library_symbol *libsym = self->breakpoint_being_enabled->libsym;
586 GElf_Addr value;
587 if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0)
588 return CBS_FAIL;
589
590 /* In UNRESOLVED state, the RESOLVED_VALUE in fact contains
591 * the PLT entry value. */
592 if (value == libsym->arch.resolved_value)
593 return CBS_CONT;
594
595 /* The .plt slot got resolved! We can migrate the breakpoint
596 * to RESOLVED and stop single-stepping. */
597 if (unresolve_plt_slot(proc, libsym->arch.plt_slot_addr,
598 libsym->arch.resolved_value) < 0)
599 return CBS_FAIL;
Petr Machata6b314182012-04-15 04:40:45 +0200600
601 /* Install breakpoint to the address where the change takes
602 * place. If we fail, then that just means that we'll have to
603 * singlestep the next time around as well. */
604 struct Process *leader = proc->leader;
605 if (leader == NULL || leader->arch.dl_plt_update_bp != NULL)
606 goto resolve;
607
608 /* We need to install to the next instruction. ADDR points to
609 * a store instruction, so moving the breakpoint one
610 * instruction forward is safe. */
611 target_address_t addr = get_instruction_pointer(proc) + 4;
612 leader->arch.dl_plt_update_bp = insert_breakpoint(proc, addr, NULL);
613
614 /* Turn it off for now. We will turn it on again when we hit
615 * the PLT entry that needs this. */
616 breakpoint_turn_off(leader->arch.dl_plt_update_bp, proc);
617
618 if (leader->arch.dl_plt_update_bp != NULL) {
619 static struct bp_callbacks dl_plt_update_cbs = {
620 .on_hit = dl_plt_update_bp_on_hit,
621 };
622 leader->arch.dl_plt_update_bp->cbs = &dl_plt_update_cbs;
623 }
624
625resolve:
Petr Machata58b2d0f2012-03-28 02:19:20 +0200626 libsym->arch.type = PPC64PLT_RESOLVED;
627 libsym->arch.resolved_value = value;
628
629 return CBS_STOP;
Petr Machatab64b5c72012-03-27 03:19:42 +0200630}
631
Petr Machata58b2d0f2012-03-28 02:19:20 +0200632static void
633ppc64_plt_bp_continue(struct breakpoint *bp, struct Process *proc)
634{
Petr Machata58b2d0f2012-03-28 02:19:20 +0200635 switch (bp->libsym->arch.type) {
636 target_address_t rv;
Petr Machata6b314182012-04-15 04:40:45 +0200637 struct Process *leader;
638 void (*on_all_stopped)(struct process_stopping_handler *);
639 enum callback_status (*keep_stepping_p)
640 (struct process_stopping_handler *);
641
Petr Machata58b2d0f2012-03-28 02:19:20 +0200642 case PPC64PLT_UNRESOLVED:
Petr Machata6b314182012-04-15 04:40:45 +0200643 on_all_stopped = NULL;
644 keep_stepping_p = NULL;
645 leader = proc->leader;
646
Petr Machata05058b72012-04-17 01:33:03 +0200647 if (leader != NULL && leader->arch.dl_plt_update_bp != NULL
648 && breakpoint_turn_on(leader->arch.dl_plt_update_bp,
649 proc) >= 0)
Petr Machata6b314182012-04-15 04:40:45 +0200650 on_all_stopped = cb_on_all_stopped;
Petr Machata05058b72012-04-17 01:33:03 +0200651 else
Petr Machata6b314182012-04-15 04:40:45 +0200652 keep_stepping_p = cb_keep_stepping_p;
Petr Machata6b314182012-04-15 04:40:45 +0200653
654 if (process_install_stopping_handler
655 (proc, bp, on_all_stopped, keep_stepping_p, NULL) < 0) {
Petr Machata58b2d0f2012-03-28 02:19:20 +0200656 perror("ppc64_unresolved_bp_continue: couldn't install"
657 " event handler");
658 continue_after_breakpoint(proc, bp);
659 }
660 return;
661
662 case PPC64PLT_RESOLVED:
Petr Machataea8eb9a2012-04-17 01:32:07 +0200663 /* XXX The double cast should be removed when
664 * target_address_t becomes integral type. */
665 rv = (target_address_t)
666 (uintptr_t)bp->libsym->arch.resolved_value;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200667 set_instruction_pointer(proc, rv);
668 continue_process(proc->pid);
Petr Machata50969622012-04-06 16:06:26 +0200669 return;
670
Petr Machatafbd97422012-04-16 21:09:18 +0200671 case PPC_DEFAULT:
Petr Machata50969622012-04-06 16:06:26 +0200672 case PPC64PLT_STUB:
Petr Machatafbd97422012-04-16 21:09:18 +0200673 /* These should never hit here. */
Petr Machata50969622012-04-06 16:06:26 +0200674 break;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200675 }
Petr Machata50969622012-04-06 16:06:26 +0200676
677 assert(bp->libsym->arch.type != bp->libsym->arch.type);
678 abort();
Petr Machata58b2d0f2012-03-28 02:19:20 +0200679}
680
Petr Machatad9573322012-04-17 05:21:02 +0200681void
682arch_library_init(struct library *lib)
683{
684}
685
686void
687arch_library_destroy(struct library *lib)
688{
689}
690
691void
692arch_library_clone(struct library *retp, struct library *lib)
693{
694}
695
Petr Machata24c6e9d2012-04-15 04:31:34 +0200696int
697arch_library_symbol_init(struct library_symbol *libsym)
698{
699 /* We set type explicitly in the code above, where we have the
700 * necessary context. This is for calls from ltrace-elf.c and
701 * such. */
Petr Machatafbd97422012-04-16 21:09:18 +0200702 libsym->arch.type = PPC_DEFAULT;
Petr Machata24c6e9d2012-04-15 04:31:34 +0200703 return 0;
704}
705
706void
707arch_library_symbol_destroy(struct library_symbol *libsym)
708{
709}
710
711int
712arch_library_symbol_clone(struct library_symbol *retp,
713 struct library_symbol *libsym)
714{
715 retp->arch = libsym->arch;
716 return 0;
717}
718
Petr Machata52dbfb12012-03-29 16:38:26 +0200719/* For some symbol types, we need to set up custom callbacks. XXX we
720 * don't need PROC here, we can store the data in BP if it is of
721 * interest to us. */
Petr Machatab64b5c72012-03-27 03:19:42 +0200722int
723arch_breakpoint_init(struct Process *proc, struct breakpoint *bp)
724{
725 if (proc->e_machine == EM_PPC
Petr Machata052b5f12012-04-06 14:53:07 +0200726 || bp->libsym == NULL)
727 return 0;
728
Petr Machatafbd97422012-04-16 21:09:18 +0200729 /* Entry point breakpoints (LS_TOPLT_NONE) and stub PLT
730 * breakpoints need no special handling. */
Petr Machata052b5f12012-04-06 14:53:07 +0200731 if (bp->libsym->plt_type != LS_TOPLT_EXEC
Petr Machatab64b5c72012-03-27 03:19:42 +0200732 || bp->libsym->arch.type == PPC64PLT_STUB)
733 return 0;
734
Petr Machata58b2d0f2012-03-28 02:19:20 +0200735 static struct bp_callbacks cbs = {
736 .on_continue = ppc64_plt_bp_continue,
737 };
738 breakpoint_set_callbacks(bp, &cbs);
Petr Machatab64b5c72012-03-27 03:19:42 +0200739 return 0;
740}
741
742void
743arch_breakpoint_destroy(struct breakpoint *bp)
744{
745}
Petr Machatad3cc9882012-04-13 21:40:23 +0200746
747int
748arch_breakpoint_clone(struct breakpoint *retp, struct breakpoint *sbp)
749{
750 retp->arch = sbp->arch;
751 return 0;
752}
Petr Machata6b314182012-04-15 04:40:45 +0200753
754int
755arch_process_init(struct Process *proc)
756{
757 proc->arch.dl_plt_update_bp = NULL;
758 proc->arch.handler = NULL;
759 return 0;
760}
761
762void
763arch_process_destroy(struct Process *proc)
764{
765}
766
767int
768arch_process_clone(struct Process *retp, struct Process *proc)
769{
770 retp->arch = proc->arch;
771 return 0;
772}
773
774int
775arch_process_exec(struct Process *proc)
776{
777 return arch_process_init(proc);
778}