blob: eb4c812f40b370b1e6c123eb8d7fb36e6fc0fdf1 [file] [log] [blame]
Juan Cespedesd914a202004-11-10 00:15:33 +01001#include <gelf.h>
Juan Cespedesa7af00d2009-07-26 13:23:18 +02002#include <sys/ptrace.h>
Petr Machatae67635d2012-03-21 03:37:39 +01003#include <errno.h>
4#include <error.h>
5#include <inttypes.h>
6#include <assert.h>
Petr Machata37d368e2012-03-24 04:58:08 +01007#include <string.h>
Petr Machatae67635d2012-03-21 03:37:39 +01008
Petr Machata366c2f42012-02-09 19:34:36 +01009#include "proc.h"
Juan Cespedesf7281232009-06-25 16:11:21 +020010#include "common.h"
Petr Machatae67635d2012-03-21 03:37:39 +010011#include "library.h"
Petr Machatab64b5c72012-03-27 03:19:42 +020012#include "breakpoint.h"
Petr Machata58b2d0f2012-03-28 02:19:20 +020013#include "linux-gnu/trace.h"
Petr Machatae67635d2012-03-21 03:37:39 +010014
Petr Machata37d368e2012-03-24 04:58:08 +010015/* There are two PLT types on 32-bit PPC: old-style, BSS PLT, and
16 * new-style "secure" PLT. We can tell one from the other by the
17 * flags on the .plt section. If it's +X (executable), it's BSS PLT,
18 * otherwise it's secure.
19 *
20 * BSS PLT works the same way as most architectures: the .plt section
Petr Machata9a45d222012-04-17 13:48:58 +020021 * contains trampolines and we put breakpoints to those. If not
22 * prelinked, .plt contains zeroes, and dynamic linker fills in the
23 * initial set of trampolines, which means that we need to delay
24 * enabling breakpoints until after binary entry point is hit.
25 * Additionally, after first call, dynamic linker updates .plt with
26 * branch to resolved address. That means that on first hit, we must
27 * do something similar to the PPC64 gambit described below.
Petr Machata37d368e2012-03-24 04:58:08 +010028 *
Petr Machata9a45d222012-04-17 13:48:58 +020029 * With secure PLT, the .plt section doesn't contain instructions but
30 * addresses. The real PLT table is stored in .text. Addresses of
31 * those PLT entries can be computed, and apart from the fact that
32 * they are in .text, they are ordinary PLT entries.
Petr Machata37d368e2012-03-24 04:58:08 +010033 *
34 * 64-bit PPC is more involved. Program linker creates for each
35 * library call a _stub_ symbol named xxxxxxxx.plt_call.<callee>
36 * (where xxxxxxxx is a hexadecimal number). That stub does the call
37 * dispatch: it loads an address of a function to call from the
38 * section .plt, and branches. PLT entries themselves are essentially
39 * a curried call to the resolver. When the symbol is resolved, the
40 * resolver updates the value stored in .plt, and the next time
41 * around, the stub calls the library function directly. So we make
42 * at most one trip (none if the binary is prelinked) through each PLT
43 * entry, and correspondingly that is useless as a breakpoint site.
44 *
45 * Note the three confusing terms: stubs (that play the role of PLT
46 * entries), PLT entries, .plt section.
47 *
48 * We first check symbol tables and see if we happen to have stub
49 * symbols available. If yes we just put breakpoints to those, and
50 * treat them as usual breakpoints. The only tricky part is realizing
51 * that there can be more than one breakpoint per symbol.
52 *
53 * The case that we don't have the stub symbols available is harder.
54 * The following scheme uses two kinds of PLT breakpoints: unresolved
55 * and resolved (to some address). When the process starts (or when
56 * we attach), we distribute unresolved PLT breakpoints to the PLT
57 * entries (not stubs). Then we look in .plt, and for each entry
58 * whose value is different than the corresponding PLT entry address,
59 * we assume it was already resolved, and convert the breakpoint to
60 * resolved. We also rewrite the resolved value in .plt back to the
61 * PLT address.
62 *
63 * When a PLT entry hits a resolved breakpoint (which happens because
Petr Machata19c0f292012-04-15 19:09:02 +020064 * we rewrite .plt with the original unresolved addresses), we move
65 * the instruction pointer to the corresponding address and continue
66 * the process as if nothing happened.
Petr Machata37d368e2012-03-24 04:58:08 +010067 *
68 * When unresolved PLT entry is called for the first time, we need to
69 * catch the new value that the resolver will write to a .plt slot.
70 * We also need to prevent another thread from racing through and
71 * taking the branch without ltrace noticing. So when unresolved PLT
72 * entry hits, we have to stop all threads. We then single-step
73 * through the resolver, until the .plt slot changes. When it does,
74 * we treat it the same way as above: convert the PLT breakpoint to
75 * resolved, and rewrite the .plt value back to PLT address. We then
76 * start all threads again.
77 *
Petr Machata19c0f292012-04-15 19:09:02 +020078 * As an optimization, we remember the address where the address was
79 * resolved, and put a breakpoint there. The next time around (when
80 * the next PLT entry is to be resolved), instead of single-stepping
81 * through half the dynamic linker, we just let the thread run and hit
82 * this breakpoint. When it hits, we know the PLT entry was resolved.
Petr Machata58b2d0f2012-03-28 02:19:20 +020083 *
Petr Machata9a45d222012-04-17 13:48:58 +020084 * N.B. It's tempting to try to emulate the instruction that updates
85 * .plt. We would compute the resolved address, and instead of
86 * letting the dynamic linker put it in .plt, we would resolve the
87 * breakpoint to that address. This way we wouldn't need to stop
88 * other threads. However that instruction may turn out to be a sync,
89 * and in general, may be any instruction between the actual write and
90 * the following sync. XXX TODO that means that we need to put the
91 * post-enable breakpoint at the following sync, not to the
92 * instruction itself (unless it's a sync already).
Petr Machata19c0f292012-04-15 19:09:02 +020093 *
94 * XXX TODO If we have hardware watch point, we might put a read watch
95 * on .plt slot, and discover the offenders this way. I don't know
96 * the details, but I assume at most a handful (like, one or two, if
97 * available at all) addresses may be watched at a time, and thus this
98 * would be used as an amendment of the above rather than full-on
99 * solution to PLT tracing on PPC.
Petr Machata37d368e2012-03-24 04:58:08 +0100100 */
101
Petr Machatae67635d2012-03-21 03:37:39 +0100102#define PPC_PLT_STUB_SIZE 16
Petr Machatab64b5c72012-03-27 03:19:42 +0200103#define PPC64_PLT_STUB_SIZE 8 //xxx
Petr Machatae67635d2012-03-21 03:37:39 +0100104
105static inline int
Petr Machata4e2073f2012-03-21 05:15:44 +0100106host_powerpc64()
Petr Machatae67635d2012-03-21 03:37:39 +0100107{
108#ifdef __powerpc64__
109 return 1;
110#else
111 return 0;
112#endif
113}
114
Petr Machata42748ac2012-04-19 04:24:25 +0200115int
Petr Machata9a45d222012-04-17 13:48:58 +0200116read_target_4(struct Process *proc, target_address_t addr, uint32_t *lp)
117{
118 unsigned long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
119 if (l == -1UL && errno)
120 return -1;
Petr Machataa753c982012-04-20 01:38:35 +0200121#ifdef __powerpc64__
122 l >>= 32;
123#endif
Petr Machata9a45d222012-04-17 13:48:58 +0200124 *lp = l;
125 return 0;
126}
127
128static int
129read_target_8(struct Process *proc, target_address_t addr, uint64_t *lp)
130{
Petr Machata9a45d222012-04-17 13:48:58 +0200131 unsigned long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
132 if (l == -1UL && errno)
133 return -1;
Petr Machatad2fc09d2012-04-19 04:48:40 +0200134 if (host_powerpc64()) {
135 *lp = l;
136 } else {
137 unsigned long l2 = ptrace(PTRACE_PEEKTEXT, proc->pid,
138 addr + 4, 0);
139 if (l2 == -1UL && errno)
140 return -1;
Petr Machataa753c982012-04-20 01:38:35 +0200141 *lp = ((uint64_t)l << 32) | l2;
Petr Machatad2fc09d2012-04-19 04:48:40 +0200142 }
Petr Machata9a45d222012-04-17 13:48:58 +0200143 return 0;
144}
145
Petr Machatad2fc09d2012-04-19 04:48:40 +0200146int
Petr Machata9a45d222012-04-17 13:48:58 +0200147read_target_long(struct Process *proc, target_address_t addr, uint64_t *lp)
148{
149 if (proc->e_machine == EM_PPC) {
150 uint32_t w;
151 int ret = read_target_4(proc, addr, &w);
152 if (ret >= 0)
153 *lp = (uint64_t)w;
154 return ret;
155 } else {
156 return read_target_8(proc, addr, lp);
157 }
158}
159
Petr Machatad9573322012-04-17 05:21:02 +0200160static enum callback_status
161reenable_breakpoint(struct Process *proc, struct breakpoint *bp, void *data)
162{
163 /* We don't need to re-enable non-PLT breakpoints and
164 * breakpoints that are not PPC32 BSS unprelinked. */
165 if (bp->libsym == NULL
166 || bp->libsym->plt_type == LS_TOPLT_NONE
167 || bp->libsym->lib->arch.bss_plt_prelinked != 0)
168 return CBS_CONT;
169
170 debug(DEBUG_PROCESS, "pid=%d reenable_breakpoint %s",
171 proc->pid, breakpoint_name(bp));
172
Petr Machata9a45d222012-04-17 13:48:58 +0200173 assert(proc->e_machine == EM_PPC);
Petr Machatad2fc09d2012-04-19 04:48:40 +0200174 uint64_t l;
175 if (read_target_8(proc, bp->addr, &l) < 0) {
Petr Machata9a45d222012-04-17 13:48:58 +0200176 error(0, errno, "couldn't read PLT value for %s(%p)",
177 breakpoint_name(bp), bp->addr);
178 return CBS_CONT;
179 }
Petr Machataa753c982012-04-20 01:38:35 +0200180 /* XXX double cast */
181 bp->libsym->arch.plt_slot_addr = (GElf_Addr)(uintptr_t)bp->addr;
Petr Machata9a45d222012-04-17 13:48:58 +0200182 bp->libsym->arch.resolved_value = l;
183
Petr Machatad9573322012-04-17 05:21:02 +0200184 /* Re-enable the breakpoint that was overwritten by the
Petr Machataf685a3d2012-04-17 17:06:32 +0200185 * dynamic linker. */
Petr Machatad9573322012-04-17 05:21:02 +0200186 enable_breakpoint(proc, bp);
187
188 return CBS_CONT;
189}
190
191void
192arch_dynlink_done(struct Process *proc)
193{
194 /* On PPC32, .plt of objects that use BSS PLT are overwritten
195 * by the dynamic linker (unless that object was prelinked).
196 * We need to re-enable breakpoints in those objects. */
197 proc_each_breakpoint(proc, NULL, reenable_breakpoint, NULL);
198}
199
Juan Cespedesf1350522008-12-16 18:19:58 +0100200GElf_Addr
Petr Machata4e2073f2012-03-21 05:15:44 +0100201arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela *rela)
202{
203 if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) {
204 assert(lte->arch.plt_stub_vma != 0);
Petr Machatae67635d2012-03-21 03:37:39 +0100205 return lte->arch.plt_stub_vma + PPC_PLT_STUB_SIZE * ndx;
Petr Machata4e2073f2012-03-21 05:15:44 +0100206
207 } else if (lte->ehdr.e_machine == EM_PPC) {
Petr Machatae67635d2012-03-21 03:37:39 +0100208 return rela->r_offset;
Petr Machata4e2073f2012-03-21 05:15:44 +0100209
210 } else {
Petr Machatab64b5c72012-03-27 03:19:42 +0200211 /* If we get here, we don't have stub symbols. In
212 * that case we put brakpoints to PLT entries the same
213 * as the PPC32 secure PLT case does. */
214 assert(lte->arch.plt_stub_vma != 0);
215 return lte->arch.plt_stub_vma + PPC64_PLT_STUB_SIZE * ndx;
Petr Machata4e2073f2012-03-21 05:15:44 +0100216 }
Petr Machatae67635d2012-03-21 03:37:39 +0100217}
218
219int
220arch_translate_address(struct Process *proc,
221 target_address_t addr, target_address_t *ret)
222{
Petr Machatab64b5c72012-03-27 03:19:42 +0200223 if (proc->e_machine == EM_PPC64) {
224 assert(host_powerpc64());
Petr Machatae67635d2012-03-21 03:37:39 +0100225 long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
Petr Machatae67635d2012-03-21 03:37:39 +0100226 if (l == -1 && errno) {
227 error(0, errno, ".opd translation of %p", addr);
228 return -1;
229 }
230 *ret = (target_address_t)l;
231 return 0;
232 }
233
234 *ret = addr;
235 return 0;
Juan Cespedesd914a202004-11-10 00:15:33 +0100236}
Ian Wienand9a2ad352006-02-20 22:44:45 +0100237
Juan Cespedesf1350522008-12-16 18:19:58 +0100238void *
Petr Machata18c801c2012-04-07 01:24:08 +0200239sym2addr(struct Process *proc, struct library_symbol *sym)
240{
241 return sym->enter_addr;
Ian Wienand9a2ad352006-02-20 22:44:45 +0100242}
Petr Machatae67635d2012-03-21 03:37:39 +0100243
244static GElf_Addr
245get_glink_vma(struct ltelf *lte, GElf_Addr ppcgot, Elf_Data *plt_data)
246{
247 Elf_Scn *ppcgot_sec = NULL;
248 GElf_Shdr ppcgot_shdr;
249 if (ppcgot != 0
250 && elf_get_section_covering(lte, ppcgot,
251 &ppcgot_sec, &ppcgot_shdr) < 0)
Petr Machata8b00d5b2012-04-06 16:05:10 +0200252 error(0, 0, "DT_PPC_GOT=%#"PRIx64", but no such section found",
253 ppcgot);
Petr Machatae67635d2012-03-21 03:37:39 +0100254
255 if (ppcgot_sec != NULL) {
256 Elf_Data *data = elf_loaddata(ppcgot_sec, &ppcgot_shdr);
257 if (data == NULL || data->d_size < 8 ) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200258 error(0, 0, "couldn't read GOT data");
Petr Machatae67635d2012-03-21 03:37:39 +0100259 } else {
260 // where PPCGOT begins in .got
261 size_t offset = ppcgot - ppcgot_shdr.sh_addr;
262 assert(offset % 4 == 0);
263 uint32_t glink_vma;
264 if (elf_read_u32(data, offset + 4, &glink_vma) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200265 error(0, 0, "couldn't read glink VMA address"
266 " at %zd@GOT", offset);
Petr Machatae67635d2012-03-21 03:37:39 +0100267 return 0;
268 }
269 if (glink_vma != 0) {
270 debug(1, "PPC GOT glink_vma address: %#" PRIx32,
271 glink_vma);
Petr Machatae67635d2012-03-21 03:37:39 +0100272 return (GElf_Addr)glink_vma;
273 }
274 }
275 }
276
277 if (plt_data != NULL) {
278 uint32_t glink_vma;
279 if (elf_read_u32(plt_data, 0, &glink_vma) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200280 error(0, 0, "couldn't read glink VMA address");
Petr Machatae67635d2012-03-21 03:37:39 +0100281 return 0;
282 }
283 debug(1, ".plt glink_vma address: %#" PRIx32, glink_vma);
Petr Machatae67635d2012-03-21 03:37:39 +0100284 return (GElf_Addr)glink_vma;
285 }
286
287 return 0;
288}
289
Petr Machata644d6692012-03-24 02:06:48 +0100290static int
Petr Machatad1746d12012-03-27 03:14:14 +0200291load_dynamic_entry(struct ltelf *lte, int tag, GElf_Addr *valuep)
Petr Machatae67635d2012-03-21 03:37:39 +0100292{
Petr Machata644d6692012-03-24 02:06:48 +0100293 Elf_Scn *scn;
294 GElf_Shdr shdr;
295 if (elf_get_section_type(lte, SHT_DYNAMIC, &scn, &shdr) < 0
296 || scn == NULL) {
297 fail:
298 error(0, 0, "Couldn't get SHT_DYNAMIC: %s",
299 elf_errmsg(-1));
300 return -1;
Petr Machatae67635d2012-03-21 03:37:39 +0100301 }
Petr Machata644d6692012-03-24 02:06:48 +0100302
303 Elf_Data *data = elf_loaddata(scn, &shdr);
304 if (data == NULL)
305 goto fail;
306
307 size_t j;
308 for (j = 0; j < shdr.sh_size / shdr.sh_entsize; ++j) {
309 GElf_Dyn dyn;
310 if (gelf_getdyn(data, j, &dyn) == NULL)
311 goto fail;
312
Petr Machatad1746d12012-03-27 03:14:14 +0200313 if(dyn.d_tag == tag) {
314 *valuep = dyn.d_un.d_ptr;
Petr Machata644d6692012-03-24 02:06:48 +0100315 return 0;
316 }
317 }
318
319 return -1;
Petr Machatae67635d2012-03-21 03:37:39 +0100320}
321
Petr Machatad1746d12012-03-27 03:14:14 +0200322static int
323load_ppcgot(struct ltelf *lte, GElf_Addr *ppcgotp)
324{
325 return load_dynamic_entry(lte, DT_PPC_GOT, ppcgotp);
326}
327
Petr Machatab64b5c72012-03-27 03:19:42 +0200328static int
329load_ppc64_glink(struct ltelf *lte, GElf_Addr *glinkp)
330{
331 return load_dynamic_entry(lte, DT_PPC64_GLINK, glinkp);
332}
333
Petr Machatad9573322012-04-17 05:21:02 +0200334static int
335nonzero_data(Elf_Data *data)
336{
Petr Machata9a45d222012-04-17 13:48:58 +0200337 /* We are not supposed to get here if there's no PLT. */
Petr Machatad9573322012-04-17 05:21:02 +0200338 assert(data != NULL);
339
340 unsigned char *buf = data->d_buf;
341 if (buf == NULL)
342 return 0;
343
344 size_t i;
345 for (i = 0; i < data->d_size; ++i)
346 if (buf[i] != 0)
347 return 1;
348 return 0;
349}
350
Petr Machatae67635d2012-03-21 03:37:39 +0100351int
Petr Machatad9573322012-04-17 05:21:02 +0200352arch_elf_init(struct ltelf *lte, struct library *lib)
Petr Machatae67635d2012-03-21 03:37:39 +0100353{
Petr Machata18c801c2012-04-07 01:24:08 +0200354 lte->arch.secure_plt = !(lte->plt_flags & SHF_EXECINSTR);
Petr Machatad9573322012-04-17 05:21:02 +0200355
356 /* For PPC32 BSS, it is important whether the binary was
357 * prelinked. If .plt section is NODATA, or if it contains
358 * zeroes, then this library is not prelinked, and we need to
359 * delay breakpoints. */
360 if (lte->ehdr.e_machine == EM_PPC && !lte->arch.secure_plt)
361 lib->arch.bss_plt_prelinked = nonzero_data(lte->plt_data);
362 else
363 /* For cases where it's irrelevant, initialize the
364 * value to something conspicuous. */
365 lib->arch.bss_plt_prelinked = -1;
366
Petr Machata4e2073f2012-03-21 05:15:44 +0100367 if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) {
Petr Machata644d6692012-03-24 02:06:48 +0100368 GElf_Addr ppcgot;
369 if (load_ppcgot(lte, &ppcgot) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200370 error(0, 0, "couldn't find DT_PPC_GOT");
Petr Machata644d6692012-03-24 02:06:48 +0100371 return -1;
372 }
373 GElf_Addr glink_vma = get_glink_vma(lte, ppcgot, lte->plt_data);
Petr Machatae67635d2012-03-21 03:37:39 +0100374
375 assert (lte->relplt_size % 12 == 0);
376 size_t count = lte->relplt_size / 12; // size of RELA entry
377 lte->arch.plt_stub_vma = glink_vma
378 - (GElf_Addr)count * PPC_PLT_STUB_SIZE;
379 debug(1, "stub_vma is %#" PRIx64, lte->arch.plt_stub_vma);
Petr Machatab64b5c72012-03-27 03:19:42 +0200380
381 } else if (lte->ehdr.e_machine == EM_PPC64) {
382 GElf_Addr glink_vma;
383 if (load_ppc64_glink(lte, &glink_vma) < 0) {
Petr Machata8b00d5b2012-04-06 16:05:10 +0200384 error(0, 0, "couldn't find DT_PPC64_GLINK");
Petr Machatab64b5c72012-03-27 03:19:42 +0200385 return -1;
386 }
387
388 /* The first glink stub starts at offset 32. */
389 lte->arch.plt_stub_vma = glink_vma + 32;
Petr Machatae67635d2012-03-21 03:37:39 +0100390 }
391
Petr Machata37d368e2012-03-24 04:58:08 +0100392 /* On PPC64, look for stub symbols in symbol table. These are
393 * called: xxxxxxxx.plt_call.callee_name@version+addend. */
394 if (lte->ehdr.e_machine == EM_PPC64
395 && lte->symtab != NULL && lte->strtab != NULL) {
396
397 /* N.B. We can't simply skip the symbols that we fail
398 * to read or malloc. There may be more than one stub
399 * per symbol name, and if we failed in one but
400 * succeeded in another, the PLT enabling code would
401 * have no way to tell that something is missing. We
402 * could work around that, of course, but it doesn't
Petr Machata7b361142012-03-24 14:27:01 +0100403 * seem worth the trouble. So if anything fails, we
404 * just pretend that we don't have stub symbols at
405 * all, as if the binary is stripped. */
Petr Machata37d368e2012-03-24 04:58:08 +0100406
407 size_t i;
408 for (i = 0; i < lte->symtab_count; ++i) {
409 GElf_Sym sym;
Petr Machata7b361142012-03-24 14:27:01 +0100410 if (gelf_getsym(lte->symtab, i, &sym) == NULL) {
411 struct library_symbol *sym, *next;
412 fail:
413 for (sym = lte->arch.stubs; sym != NULL; ) {
414 next = sym->next;
415 library_symbol_destroy(sym);
416 free(sym);
417 sym = next;
418 }
419 lte->arch.stubs = NULL;
Petr Machata37d368e2012-03-24 04:58:08 +0100420 break;
Petr Machata7b361142012-03-24 14:27:01 +0100421 }
Petr Machata37d368e2012-03-24 04:58:08 +0100422
423 const char *name = lte->strtab + sym.st_name;
424
425#define STUBN ".plt_call."
426 if ((name = strstr(name, STUBN)) == NULL)
427 continue;
428 name += sizeof(STUBN) - 1;
429#undef STUBN
430
431 size_t len;
432 const char *ver = strchr(name, '@');
433 if (ver != NULL) {
434 len = ver - name;
435
436 } else {
437 /* If there is "+" at all, check that
438 * the symbol name ends in "+0". */
439 const char *add = strrchr(name, '+');
440 if (add != NULL) {
441 assert(strcmp(add, "+0") == 0);
442 len = add - name;
443 } else {
444 len = strlen(name);
445 }
446 }
447
448 char *sym_name = strndup(name, len);
Petr Machata7b361142012-03-24 14:27:01 +0100449 struct library_symbol *libsym = malloc(sizeof(*libsym));
450 if (sym_name == NULL || libsym == NULL) {
Petr Machatae8d90762012-04-15 04:28:31 +0200451 fail2:
Petr Machata37d368e2012-03-24 04:58:08 +0100452 free(sym_name);
Petr Machata7b361142012-03-24 14:27:01 +0100453 free(libsym);
454 goto fail;
Petr Machata37d368e2012-03-24 04:58:08 +0100455 }
456
Petr Machataea8eb9a2012-04-17 01:32:07 +0200457 /* XXX The double cast should be removed when
458 * target_address_t becomes integral type. */
459 target_address_t addr = (target_address_t)
460 (uintptr_t)sym.st_value + lte->bias;
Petr Machatae8d90762012-04-15 04:28:31 +0200461 if (library_symbol_init(libsym, addr, sym_name, 1,
462 LS_TOPLT_EXEC) < 0)
463 goto fail2;
Petr Machata585f60f2012-04-17 17:05:12 +0200464 libsym->arch.type = PPC64_PLT_STUB;
Petr Machata37d368e2012-03-24 04:58:08 +0100465 libsym->next = lte->arch.stubs;
466 lte->arch.stubs = libsym;
467 }
468 }
469
Petr Machatae67635d2012-03-21 03:37:39 +0100470 return 0;
471}
Petr Machata37d368e2012-03-24 04:58:08 +0100472
Petr Machata58b2d0f2012-03-28 02:19:20 +0200473static int
474read_plt_slot_value(struct Process *proc, GElf_Addr addr, GElf_Addr *valp)
475{
Petr Machata9a45d222012-04-17 13:48:58 +0200476 /* On PPC64, we read from .plt, which contains 8 byte
477 * addresses. On PPC32 we read from .plt, which contains 4
Petr Machatad2fc09d2012-04-19 04:48:40 +0200478 * byte instructions, but the PLT is two instructions, and
479 * either can change. */
Petr Machata9a45d222012-04-17 13:48:58 +0200480 uint64_t l;
Petr Machataa753c982012-04-20 01:38:35 +0200481 /* XXX double cast. */
482 if (read_target_8(proc, (target_address_t)(uintptr_t)addr, &l) < 0) {
Petr Machata58b2d0f2012-03-28 02:19:20 +0200483 error(0, errno, "ptrace .plt slot value @%#" PRIx64, addr);
484 return -1;
485 }
486
487 *valp = (GElf_Addr)l;
488 return 0;
489}
490
491static int
492unresolve_plt_slot(struct Process *proc, GElf_Addr addr, GElf_Addr value)
493{
494 /* We only modify plt_entry[0], which holds the resolved
495 * address of the routine. We keep the TOC and environment
496 * pointers intact. Hence the only adjustment that we need to
497 * do is to IP. */
498 if (ptrace(PTRACE_POKETEXT, proc->pid, addr, value) < 0) {
499 error(0, errno, "unresolve .plt slot");
500 return -1;
501 }
502 return 0;
503}
504
Petr Machata8557b4a2012-04-17 17:02:11 +0200505static void
506mark_as_resolved(struct library_symbol *libsym, GElf_Addr value)
507{
508 libsym->arch.type = PPC_PLT_RESOLVED;
509 libsym->arch.resolved_value = value;
510}
511
Petr Machata37d368e2012-03-24 04:58:08 +0100512enum plt_status
513arch_elf_add_plt_entry(struct Process *proc, struct ltelf *lte,
Petr Machatad1746d12012-03-27 03:14:14 +0200514 const char *a_name, GElf_Rela *rela, size_t ndx,
Petr Machata37d368e2012-03-24 04:58:08 +0100515 struct library_symbol **ret)
516{
517 if (lte->ehdr.e_machine == EM_PPC)
518 return plt_default;
519
520 /* PPC64. If we have stubs, we return a chain of breakpoint
521 * sites, one for each stub that corresponds to this PLT
522 * entry. */
523 struct library_symbol *chain = NULL;
524 struct library_symbol **symp;
525 for (symp = &lte->arch.stubs; *symp != NULL; ) {
526 struct library_symbol *sym = *symp;
527 if (strcmp(sym->name, a_name) != 0) {
528 symp = &(*symp)->next;
529 continue;
530 }
531
532 /* Re-chain the symbol from stubs to CHAIN. */
533 *symp = sym->next;
534 sym->next = chain;
535 chain = sym;
536 }
537
538 if (chain != NULL) {
Petr Machata37d368e2012-03-24 04:58:08 +0100539 *ret = chain;
540 return plt_ok;
541 }
542
Petr Machatab64b5c72012-03-27 03:19:42 +0200543 /* We don't have stub symbols. Find corresponding .plt slot,
544 * and check whether it contains the corresponding PLT address
545 * (or 0 if the dynamic linker hasn't run yet). N.B. we don't
546 * want read this from ELF file, but from process image. That
547 * makes a difference if we are attaching to a running
548 * process. */
549
550 GElf_Addr plt_entry_addr = arch_plt_sym_val(lte, ndx, rela);
551 GElf_Addr plt_slot_addr = rela->r_offset;
552 assert(plt_slot_addr >= lte->plt_addr
553 || plt_slot_addr < lte->plt_addr + lte->plt_size);
554
Petr Machata58b2d0f2012-03-28 02:19:20 +0200555 GElf_Addr plt_slot_value;
556 if (read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value) < 0)
Petr Machatab64b5c72012-03-27 03:19:42 +0200557 return plt_fail;
Petr Machatab64b5c72012-03-27 03:19:42 +0200558
559 char *name = strdup(a_name);
560 struct library_symbol *libsym = malloc(sizeof(*libsym));
561 if (name == NULL || libsym == NULL) {
562 error(0, errno, "allocation for .plt slot");
563 fail:
564 free(name);
565 free(libsym);
566 return plt_fail;
567 }
568
Petr Machataea8eb9a2012-04-17 01:32:07 +0200569 /* XXX The double cast should be removed when
570 * target_address_t becomes integral type. */
571 if (library_symbol_init(libsym,
572 (target_address_t)(uintptr_t)plt_entry_addr,
Petr Machatae8d90762012-04-15 04:28:31 +0200573 name, 1, LS_TOPLT_EXEC) < 0)
574 goto fail;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200575 libsym->arch.plt_slot_addr = plt_slot_addr;
576
577 if (plt_slot_value == plt_entry_addr || plt_slot_value == 0) {
Petr Machata585f60f2012-04-17 17:05:12 +0200578 libsym->arch.type = PPC_PLT_UNRESOLVED;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200579 libsym->arch.resolved_value = plt_entry_addr;
580
Petr Machatab64b5c72012-03-27 03:19:42 +0200581 } else {
582 /* Unresolve the .plt slot. If the binary was
583 * prelinked, this makes the code invalid, because in
584 * case of prelinked binary, the dynamic linker
585 * doesn't update .plt[0] and .plt[1] with addresses
586 * of the resover. But we don't care, we will never
587 * need to enter the resolver. That just means that
588 * we have to un-un-resolve this back before we
Petr Machata19c0f292012-04-15 19:09:02 +0200589 * detach. */
Petr Machata58b2d0f2012-03-28 02:19:20 +0200590
Petr Machatae5ebe212012-04-15 04:41:13 +0200591 if (unresolve_plt_slot(proc, plt_slot_addr, plt_entry_addr) < 0) {
592 library_symbol_destroy(libsym);
Petr Machatab64b5c72012-03-27 03:19:42 +0200593 goto fail;
Petr Machatae5ebe212012-04-15 04:41:13 +0200594 }
Petr Machata8557b4a2012-04-17 17:02:11 +0200595 mark_as_resolved(libsym, plt_slot_value);
Petr Machatab64b5c72012-03-27 03:19:42 +0200596 }
597
598 *ret = libsym;
599 return plt_ok;
Petr Machata37d368e2012-03-24 04:58:08 +0100600}
601
Petr Machata4d9a91c2012-03-24 04:55:03 +0100602void
603arch_elf_destroy(struct ltelf *lte)
604{
Petr Machata37d368e2012-03-24 04:58:08 +0100605 struct library_symbol *sym;
606 for (sym = lte->arch.stubs; sym != NULL; ) {
607 struct library_symbol *next = sym->next;
608 library_symbol_destroy(sym);
609 free(sym);
610 sym = next;
611 }
Petr Machata4d9a91c2012-03-24 04:55:03 +0100612}
Petr Machatab64b5c72012-03-27 03:19:42 +0200613
Petr Machata6b314182012-04-15 04:40:45 +0200614static void
615dl_plt_update_bp_on_hit(struct breakpoint *bp, struct Process *proc)
616{
Petr Machatab04b64b2012-04-17 17:05:37 +0200617 debug(DEBUG_PROCESS, "pid=%d dl_plt_update_bp_on_hit %s(%p)",
618 proc->pid, breakpoint_name(bp), bp->addr);
Petr Machata6b314182012-04-15 04:40:45 +0200619 struct process_stopping_handler *self = proc->arch.handler;
620 assert(self != NULL);
621
622 struct library_symbol *libsym = self->breakpoint_being_enabled->libsym;
623 GElf_Addr value;
624 if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0)
625 return;
626
Petr Machataf685a3d2012-04-17 17:06:32 +0200627 /* On PPC64, we rewrite the slot value. */
628 if (proc->e_machine == EM_PPC64)
629 unresolve_plt_slot(proc, libsym->arch.plt_slot_addr,
630 libsym->arch.resolved_value);
631 /* We mark the breakpoint as resolved on both arches. */
Petr Machata8d930e82012-04-17 17:03:01 +0200632 mark_as_resolved(libsym, value);
Petr Machata72b5ee82012-04-17 13:44:06 +0200633
Petr Machata6b314182012-04-15 04:40:45 +0200634 /* cb_on_all_stopped looks if HANDLER is set to NULL as a way
635 * to check that this was run. It's an error if it
636 * wasn't. */
Petr Machata6b314182012-04-15 04:40:45 +0200637 proc->arch.handler = NULL;
Petr Machatab5560582012-04-19 22:42:21 +0200638
639 breakpoint_turn_off(bp, proc);
Petr Machata6b314182012-04-15 04:40:45 +0200640}
641
642static void
643cb_on_all_stopped(struct process_stopping_handler *self)
644{
645 /* Put that in for dl_plt_update_bp_on_hit to see. */
646 assert(self->task_enabling_breakpoint->arch.handler == NULL);
647 self->task_enabling_breakpoint->arch.handler = self;
648
649 linux_ptrace_disable_and_continue(self);
650}
651
Petr Machata58b2d0f2012-03-28 02:19:20 +0200652static enum callback_status
Petr Machata6b314182012-04-15 04:40:45 +0200653cb_keep_stepping_p(struct process_stopping_handler *self)
Petr Machatab64b5c72012-03-27 03:19:42 +0200654{
Petr Machata58b2d0f2012-03-28 02:19:20 +0200655 struct Process *proc = self->task_enabling_breakpoint;
656 struct library_symbol *libsym = self->breakpoint_being_enabled->libsym;
Petr Machatab5560582012-04-19 22:42:21 +0200657
Petr Machata58b2d0f2012-03-28 02:19:20 +0200658 GElf_Addr value;
659 if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0)
660 return CBS_FAIL;
661
662 /* In UNRESOLVED state, the RESOLVED_VALUE in fact contains
663 * the PLT entry value. */
664 if (value == libsym->arch.resolved_value)
665 return CBS_CONT;
666
Petr Machatab04b64b2012-04-17 17:05:37 +0200667 debug(DEBUG_PROCESS, "pid=%d PLT got resolved to value %#"PRIx64,
668 proc->pid, value);
669
Petr Machata58b2d0f2012-03-28 02:19:20 +0200670 /* The .plt slot got resolved! We can migrate the breakpoint
671 * to RESOLVED and stop single-stepping. */
Petr Machatab5560582012-04-19 22:42:21 +0200672 if (proc->e_machine == EM_PPC64
673 && unresolve_plt_slot(proc, libsym->arch.plt_slot_addr,
674 libsym->arch.resolved_value) < 0)
Petr Machata58b2d0f2012-03-28 02:19:20 +0200675 return CBS_FAIL;
Petr Machata6b314182012-04-15 04:40:45 +0200676
Petr Machatab5560582012-04-19 22:42:21 +0200677 /* Resolving on PPC64 consists of overwriting a doubleword in
678 * .plt. That doubleword is than read back by a stub, and
679 * jumped on. Hopefully we can assume that double word update
680 * is done on a single place only, as it contains a final
681 * address. We still need to look around for any sync
682 * instruction, but essentially it is safe to optimize away
683 * the single stepping next time and install a post-update
684 * breakpoint.
685 *
686 * The situation on PPC32 BSS is more complicated. The
687 * dynamic linker here updates potentially several
688 * instructions (XXX currently we assume two) and the rules
689 * are more complicated. Sometimes it's enough to adjust just
690 * one of the addresses--the logic for generating optimal
691 * dispatch depends on relative addresses of the .plt entry
692 * and the jump destination. We can't assume that the some
693 * instruction block does the update every time. So on PPC32,
694 * we turn the optimization off and just step through it each
695 * time. */
696 if (proc->e_machine == EM_PPC)
697 goto done;
698
Petr Machata6b314182012-04-15 04:40:45 +0200699 /* Install breakpoint to the address where the change takes
700 * place. If we fail, then that just means that we'll have to
701 * singlestep the next time around as well. */
702 struct Process *leader = proc->leader;
703 if (leader == NULL || leader->arch.dl_plt_update_bp != NULL)
Petr Machata8557b4a2012-04-17 17:02:11 +0200704 goto done;
Petr Machata6b314182012-04-15 04:40:45 +0200705
706 /* We need to install to the next instruction. ADDR points to
707 * a store instruction, so moving the breakpoint one
708 * instruction forward is safe. */
709 target_address_t addr = get_instruction_pointer(proc) + 4;
710 leader->arch.dl_plt_update_bp = insert_breakpoint(proc, addr, NULL);
Petr Machata8557b4a2012-04-17 17:02:11 +0200711 if (leader->arch.dl_plt_update_bp == NULL)
712 goto done;
Petr Machata6b314182012-04-15 04:40:45 +0200713
Petr Machatab5560582012-04-19 22:42:21 +0200714 static struct bp_callbacks dl_plt_update_cbs = {
715 .on_hit = dl_plt_update_bp_on_hit,
716 };
717 leader->arch.dl_plt_update_bp->cbs = &dl_plt_update_cbs;
718
Petr Machata6b314182012-04-15 04:40:45 +0200719 /* Turn it off for now. We will turn it on again when we hit
720 * the PLT entry that needs this. */
721 breakpoint_turn_off(leader->arch.dl_plt_update_bp, proc);
722
Petr Machata8557b4a2012-04-17 17:02:11 +0200723done:
724 mark_as_resolved(libsym, value);
Petr Machata58b2d0f2012-03-28 02:19:20 +0200725
726 return CBS_STOP;
Petr Machatab64b5c72012-03-27 03:19:42 +0200727}
728
Petr Machata58b2d0f2012-03-28 02:19:20 +0200729static void
Petr Machata9a45d222012-04-17 13:48:58 +0200730ppc_plt_bp_continue(struct breakpoint *bp, struct Process *proc)
Petr Machata58b2d0f2012-03-28 02:19:20 +0200731{
Petr Machata58b2d0f2012-03-28 02:19:20 +0200732 switch (bp->libsym->arch.type) {
733 target_address_t rv;
Petr Machata6b314182012-04-15 04:40:45 +0200734 struct Process *leader;
735 void (*on_all_stopped)(struct process_stopping_handler *);
736 enum callback_status (*keep_stepping_p)
737 (struct process_stopping_handler *);
738
Petr Machata9a45d222012-04-17 13:48:58 +0200739 case PPC_DEFAULT:
740 assert(proc->e_machine == EM_PPC);
741 assert(bp->libsym != NULL);
742 assert(bp->libsym->lib->arch.bss_plt_prelinked == 0);
743 /* fall-through */
744
Petr Machata585f60f2012-04-17 17:05:12 +0200745 case PPC_PLT_UNRESOLVED:
Petr Machata6b314182012-04-15 04:40:45 +0200746 on_all_stopped = NULL;
747 keep_stepping_p = NULL;
748 leader = proc->leader;
749
Petr Machata05058b72012-04-17 01:33:03 +0200750 if (leader != NULL && leader->arch.dl_plt_update_bp != NULL
751 && breakpoint_turn_on(leader->arch.dl_plt_update_bp,
752 proc) >= 0)
Petr Machata6b314182012-04-15 04:40:45 +0200753 on_all_stopped = cb_on_all_stopped;
Petr Machata05058b72012-04-17 01:33:03 +0200754 else
Petr Machata6b314182012-04-15 04:40:45 +0200755 keep_stepping_p = cb_keep_stepping_p;
Petr Machata6b314182012-04-15 04:40:45 +0200756
757 if (process_install_stopping_handler
758 (proc, bp, on_all_stopped, keep_stepping_p, NULL) < 0) {
Petr Machata9a45d222012-04-17 13:48:58 +0200759 error(0, 0, "ppc_plt_bp_continue: couldn't install"
760 " event handler");
Petr Machata58b2d0f2012-03-28 02:19:20 +0200761 continue_after_breakpoint(proc, bp);
762 }
763 return;
764
Petr Machata585f60f2012-04-17 17:05:12 +0200765 case PPC_PLT_RESOLVED:
Petr Machataf685a3d2012-04-17 17:06:32 +0200766 if (proc->e_machine == EM_PPC) {
767 continue_after_breakpoint(proc, bp);
768 return;
769 }
770
Petr Machataea8eb9a2012-04-17 01:32:07 +0200771 /* XXX The double cast should be removed when
772 * target_address_t becomes integral type. */
773 rv = (target_address_t)
774 (uintptr_t)bp->libsym->arch.resolved_value;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200775 set_instruction_pointer(proc, rv);
776 continue_process(proc->pid);
Petr Machata50969622012-04-06 16:06:26 +0200777 return;
778
Petr Machata585f60f2012-04-17 17:05:12 +0200779 case PPC64_PLT_STUB:
Petr Machatafbd97422012-04-16 21:09:18 +0200780 /* These should never hit here. */
Petr Machata50969622012-04-06 16:06:26 +0200781 break;
Petr Machata58b2d0f2012-03-28 02:19:20 +0200782 }
Petr Machata50969622012-04-06 16:06:26 +0200783
784 assert(bp->libsym->arch.type != bp->libsym->arch.type);
785 abort();
Petr Machata58b2d0f2012-03-28 02:19:20 +0200786}
787
Petr Machatad9573322012-04-17 05:21:02 +0200788void
789arch_library_init(struct library *lib)
790{
791}
792
793void
794arch_library_destroy(struct library *lib)
795{
796}
797
798void
799arch_library_clone(struct library *retp, struct library *lib)
800{
801}
802
Petr Machata24c6e9d2012-04-15 04:31:34 +0200803int
804arch_library_symbol_init(struct library_symbol *libsym)
805{
806 /* We set type explicitly in the code above, where we have the
807 * necessary context. This is for calls from ltrace-elf.c and
808 * such. */
Petr Machatafbd97422012-04-16 21:09:18 +0200809 libsym->arch.type = PPC_DEFAULT;
Petr Machata24c6e9d2012-04-15 04:31:34 +0200810 return 0;
811}
812
813void
814arch_library_symbol_destroy(struct library_symbol *libsym)
815{
816}
817
818int
819arch_library_symbol_clone(struct library_symbol *retp,
820 struct library_symbol *libsym)
821{
822 retp->arch = libsym->arch;
823 return 0;
824}
825
Petr Machata52dbfb12012-03-29 16:38:26 +0200826/* For some symbol types, we need to set up custom callbacks. XXX we
827 * don't need PROC here, we can store the data in BP if it is of
828 * interest to us. */
Petr Machatab64b5c72012-03-27 03:19:42 +0200829int
830arch_breakpoint_init(struct Process *proc, struct breakpoint *bp)
831{
Petr Machata9a45d222012-04-17 13:48:58 +0200832 /* Artificial and entry-point breakpoints are plain. */
833 if (bp->libsym == NULL || bp->libsym->plt_type != LS_TOPLT_EXEC)
Petr Machata052b5f12012-04-06 14:53:07 +0200834 return 0;
835
Petr Machata9a45d222012-04-17 13:48:58 +0200836 /* On PPC, secure PLT and prelinked BSS PLT are plain. */
837 if (proc->e_machine == EM_PPC
838 && bp->libsym->lib->arch.bss_plt_prelinked != 0)
839 return 0;
840
841 /* On PPC64, stub PLT breakpoints are plain. */
842 if (proc->e_machine == EM_PPC64
Petr Machata585f60f2012-04-17 17:05:12 +0200843 && bp->libsym->arch.type == PPC64_PLT_STUB)
Petr Machatab64b5c72012-03-27 03:19:42 +0200844 return 0;
845
Petr Machata58b2d0f2012-03-28 02:19:20 +0200846 static struct bp_callbacks cbs = {
Petr Machata9a45d222012-04-17 13:48:58 +0200847 .on_continue = ppc_plt_bp_continue,
Petr Machata58b2d0f2012-03-28 02:19:20 +0200848 };
849 breakpoint_set_callbacks(bp, &cbs);
Petr Machatab64b5c72012-03-27 03:19:42 +0200850 return 0;
851}
852
853void
854arch_breakpoint_destroy(struct breakpoint *bp)
855{
856}
Petr Machatad3cc9882012-04-13 21:40:23 +0200857
858int
859arch_breakpoint_clone(struct breakpoint *retp, struct breakpoint *sbp)
860{
861 retp->arch = sbp->arch;
862 return 0;
863}
Petr Machata6b314182012-04-15 04:40:45 +0200864
865int
866arch_process_init(struct Process *proc)
867{
868 proc->arch.dl_plt_update_bp = NULL;
869 proc->arch.handler = NULL;
870 return 0;
871}
872
873void
874arch_process_destroy(struct Process *proc)
875{
876}
877
878int
879arch_process_clone(struct Process *retp, struct Process *proc)
880{
881 retp->arch = proc->arch;
882 return 0;
883}
884
885int
886arch_process_exec(struct Process *proc)
887{
888 return arch_process_init(proc);
889}