blob: d962048e65cd22f29272fc2c3eeb51dc0e044b1c [file] [log] [blame]
Petr Machatacec06ec2012-04-10 13:31:55 +02001#include "config.h"
2
3#include <asm/unistd.h>
4#include <sys/types.h>
5#include <sys/wait.h>
6#include <assert.h>
7#include <errno.h>
Juan Cespedes5e01f651998-03-08 22:31:44 +01008#include <stdio.h>
Juan Cespedes504a3852003-02-04 23:24:38 +01009#include <stdlib.h>
Juan Cespedes1fe93d51998-03-13 00:29:21 +010010#include <string.h>
Juan Cespedes8f8282f2002-03-03 18:58:40 +010011#include <unistd.h>
Juan Cespedes5e01f651998-03-08 22:31:44 +010012
Petr Machatacec06ec2012-04-10 13:31:55 +020013#ifdef HAVE_LIBSELINUX
14# include <selinux/selinux.h>
15#endif
16
17#include "ptrace.h"
Juan Cespedesf7281232009-06-25 16:11:21 +020018#include "common.h"
Petr Machata55ed83b2007-05-17 16:24:15 +020019
20/* If the system headers did not provide the constants, hard-code the normal
21 values. */
22#ifndef PTRACE_EVENT_FORK
23
24#define PTRACE_OLDSETOPTIONS 21
25#define PTRACE_SETOPTIONS 0x4200
26#define PTRACE_GETEVENTMSG 0x4201
27
28/* options set using PTRACE_SETOPTIONS */
29#define PTRACE_O_TRACESYSGOOD 0x00000001
30#define PTRACE_O_TRACEFORK 0x00000002
31#define PTRACE_O_TRACEVFORK 0x00000004
32#define PTRACE_O_TRACECLONE 0x00000008
33#define PTRACE_O_TRACEEXEC 0x00000010
34#define PTRACE_O_TRACEVFORKDONE 0x00000020
35#define PTRACE_O_TRACEEXIT 0x00000040
36
37/* Wait extended result codes for the above trace options. */
38#define PTRACE_EVENT_FORK 1
39#define PTRACE_EVENT_VFORK 2
40#define PTRACE_EVENT_CLONE 3
41#define PTRACE_EVENT_EXEC 4
42#define PTRACE_EVENT_VFORK_DONE 5
43#define PTRACE_EVENT_EXIT 6
44
45#endif /* PTRACE_EVENT_FORK */
Ian Wienand9a2ad352006-02-20 22:44:45 +010046
Luis Machado55c5feb2008-03-12 15:56:01 +010047#ifdef ARCH_HAVE_UMOVELONG
Juan Cespedesa8909f72009-04-28 20:02:41 +020048extern int arch_umovelong (Process *, void *, long *, arg_type_info *);
Juan Cespedesf1350522008-12-16 18:19:58 +010049int
Juan Cespedesa8909f72009-04-28 20:02:41 +020050umovelong (Process *proc, void *addr, long *result, arg_type_info *info) {
Luis Machado55c5feb2008-03-12 15:56:01 +010051 return arch_umovelong (proc, addr, result, info);
52}
53#else
54/* Read a single long from the process's memory address 'addr' */
Juan Cespedesf1350522008-12-16 18:19:58 +010055int
Juan Cespedesa8909f72009-04-28 20:02:41 +020056umovelong (Process *proc, void *addr, long *result, arg_type_info *info) {
Luis Machado55c5feb2008-03-12 15:56:01 +010057 long pointed_to;
58
59 errno = 0;
60 pointed_to = ptrace (PTRACE_PEEKTEXT, proc->pid, addr, 0);
61 if (pointed_to == -1 && errno)
62 return -errno;
63
64 *result = pointed_to;
Arnaud Patardf16fcff2010-01-08 08:40:19 -050065 if (info) {
66 switch(info->type) {
67 case ARGTYPE_INT:
68 *result &= 0x00000000ffffffffUL;
69 default:
70 break;
71 };
72 }
Luis Machado55c5feb2008-03-12 15:56:01 +010073 return 0;
74}
75#endif
76
Juan Cespedesf1350522008-12-16 18:19:58 +010077void
Petr Machatacec06ec2012-04-10 13:31:55 +020078trace_fail_warning(pid_t pid)
79{
80 /* This was adapted from GDB. */
81#ifdef HAVE_LIBSELINUX
82 static int checked = 0;
83 if (checked)
84 return;
85 checked = 1;
86
87 /* -1 is returned for errors, 0 if it has no effect, 1 if
88 * PTRACE_ATTACH is forbidden. */
89 if (security_get_boolean_active("deny_ptrace") == 1)
90 fprintf(stderr,
91"The SELinux boolean 'deny_ptrace' is enabled, which may prevent ltrace from\n"
92"tracing other processes. You can disable this process attach protection by\n"
93"issuing 'setsebool deny_ptrace=0' in the superuser context.\n");
94#endif /* HAVE_LIBSELINUX */
95}
96
97void
98trace_me(void)
99{
Petr Machata26627682011-07-08 18:15:32 +0200100 debug(DEBUG_PROCESS, "trace_me: pid=%d", getpid());
Ian Wienand2d45b1a2006-02-20 22:48:07 +0100101 if (ptrace(PTRACE_TRACEME, 0, 1, 0) < 0) {
Juan Cespedes5e01f651998-03-08 22:31:44 +0100102 perror("PTRACE_TRACEME");
Petr Machatacec06ec2012-04-10 13:31:55 +0200103 trace_fail_warning(getpid());
Juan Cespedes5e01f651998-03-08 22:31:44 +0100104 exit(1);
105 }
106}
107
Petr Machatab4f9e0c2012-02-07 01:57:59 +0100108/* There's a (hopefully) brief period of time after the child process
109 * exec's when we can't trace it yet. Here we wait for kernel to
110 * prepare the process. */
111void
112wait_for_proc(pid_t pid)
113{
114 size_t i;
115 for (i = 0; i < 100; ++i) {
116 /* We read from memory address 0, but that shouldn't
117 * be a problem: the reading will just fail. We are
118 * looking for a particular reason of failure. */
119 if (ptrace(PTRACE_PEEKTEXT, pid, 0, 0) != -1
120 || errno != ESRCH)
121 return;
122
123 usleep(1000);
124 }
125
126 fprintf(stderr, "\
127I consistently fail to read a word from the freshly launched process.\n\
128I'll now try to proceed with tracing, but this shouldn't be happening.\n");
129}
130
Juan Cespedesf1350522008-12-16 18:19:58 +0100131int
Petr Machatacec06ec2012-04-10 13:31:55 +0200132trace_pid(pid_t pid)
133{
Petr Machata26627682011-07-08 18:15:32 +0200134 debug(DEBUG_PROCESS, "trace_pid: pid=%d", pid);
Petr Machatacec06ec2012-04-10 13:31:55 +0200135 /* This shouldn't emit error messages, as there are legitimate
136 * reasons that the PID can't be attached: like it may have
137 * already ended. */
138 if (ptrace(PTRACE_ATTACH, pid, 1, 0) < 0)
Juan Cespedes273ea6d1998-03-14 23:02:40 +0100139 return -1;
Petr Machata89a53602007-01-25 18:05:44 +0100140
Juan Cespedes714ee9d2009-04-07 13:28:54 +0200141 /* man ptrace: PTRACE_ATTACH attaches to the process specified
142 in pid. The child is sent a SIGSTOP, but will not
143 necessarily have stopped by the completion of this call;
144 use wait() to wait for the child to stop. */
Petr Machata9a5420c2011-07-09 11:21:23 +0200145 if (waitpid (pid, NULL, __WALL) != pid) {
Juan Cespedes714ee9d2009-04-07 13:28:54 +0200146 perror ("trace_pid: waitpid");
Petr Machata9a5420c2011-07-09 11:21:23 +0200147 return -1;
Juan Cespedes714ee9d2009-04-07 13:28:54 +0200148 }
149
Juan Cespedes273ea6d1998-03-14 23:02:40 +0100150 return 0;
151}
152
Juan Cespedesf1350522008-12-16 18:19:58 +0100153void
Juan Cespedesa8909f72009-04-28 20:02:41 +0200154trace_set_options(Process *proc, pid_t pid) {
Ian Wienand9a2ad352006-02-20 22:44:45 +0100155 if (proc->tracesysgood & 0x80)
156 return;
Petr Machata55ed83b2007-05-17 16:24:15 +0200157
Petr Machata26627682011-07-08 18:15:32 +0200158 debug(DEBUG_PROCESS, "trace_set_options: pid=%d", pid);
Juan Cespedescd8976d2009-05-14 13:47:58 +0200159
Juan Cespedes1e583132009-04-07 18:17:11 +0200160 long options = PTRACE_O_TRACESYSGOOD | PTRACE_O_TRACEFORK |
161 PTRACE_O_TRACEVFORK | PTRACE_O_TRACECLONE |
162 PTRACE_O_TRACEEXEC;
Petr Machata55ed83b2007-05-17 16:24:15 +0200163 if (ptrace(PTRACE_SETOPTIONS, pid, 0, options) < 0 &&
164 ptrace(PTRACE_OLDSETOPTIONS, pid, 0, options) < 0) {
Ian Wienand9a2ad352006-02-20 22:44:45 +0100165 perror("PTRACE_SETOPTIONS");
166 return;
167 }
168 proc->tracesysgood |= 0x80;
169}
170
Juan Cespedesf1350522008-12-16 18:19:58 +0100171void
172untrace_pid(pid_t pid) {
Petr Machata26627682011-07-08 18:15:32 +0200173 debug(DEBUG_PROCESS, "untrace_pid: pid=%d", pid);
Juan Cespedes273ea6d1998-03-14 23:02:40 +0100174 ptrace(PTRACE_DETACH, pid, 1, 0);
Juan Cespedes1fe93d51998-03-13 00:29:21 +0100175}
176
Juan Cespedesf1350522008-12-16 18:19:58 +0100177void
178continue_after_signal(pid_t pid, int signum) {
Juan Cespedescd8976d2009-05-14 13:47:58 +0200179 debug(DEBUG_PROCESS, "continue_after_signal: pid=%d, signum=%d", pid, signum);
Petr Machata98f09922011-07-09 10:55:29 +0200180 ptrace(PTRACE_SYSCALL, pid, 0, signum);
181}
182
183static enum ecb_status
184event_for_pid(Event * event, void * data)
185{
186 if (event->proc != NULL && event->proc->pid == (pid_t)(uintptr_t)data)
187 return ecb_yield;
188 return ecb_cont;
189}
190
191static int
192have_events_for(pid_t pid)
193{
194 return each_qd_event(event_for_pid, (void *)(uintptr_t)pid) != NULL;
195}
196
197void
198continue_process(pid_t pid)
199{
200 debug(DEBUG_PROCESS, "continue_process: pid=%d", pid);
Petr Machata98f09922011-07-09 10:55:29 +0200201
202 /* Only really continue the process if there are no events in
Petr Machata36d19822011-10-21 16:03:45 +0200203 the queue for this process. Otherwise just wait for the
204 other events to arrive. */
Petr Machata98f09922011-07-09 10:55:29 +0200205 if (!have_events_for(pid))
206 /* We always trace syscalls to control fork(),
207 * clone(), execve()... */
208 ptrace(PTRACE_SYSCALL, pid, 0, 0);
209 else
210 debug(DEBUG_PROCESS,
211 "putting off the continue, events in que.");
212}
213
214/**
215 * This is used for bookkeeping related to PIDs that the event
Petr Machata750ca8c2011-10-06 14:29:34 +0200216 * handlers work with.
217 */
Petr Machata98f09922011-07-09 10:55:29 +0200218struct pid_task {
Petr Machata750ca8c2011-10-06 14:29:34 +0200219 pid_t pid; /* This may be 0 for tasks that exited
220 * mid-handling. */
Petr Machatacbe29c62011-09-27 02:27:58 +0200221 int sigstopped : 1;
222 int got_event : 1;
223 int delivered : 1;
224 int vforked : 1;
Petr Machata43d2fe52011-11-02 13:25:49 +0100225 int sysret : 1;
Petr Machata98f09922011-07-09 10:55:29 +0200226} * pids;
227
228struct pid_set {
229 struct pid_task * tasks;
230 size_t count;
231 size_t alloc;
232};
233
234/**
235 * Breakpoint re-enablement. When we hit a breakpoint, we must
236 * disable it, single-step, and re-enable it. That single-step can be
237 * done only by one task in a task group, while others are stopped,
238 * otherwise the processes would race for who sees the breakpoint
239 * disabled and who doesn't. The following is to keep track of it
240 * all.
241 */
242struct process_stopping_handler
243{
244 Event_Handler super;
245
246 /* The task that is doing the re-enablement. */
247 Process * task_enabling_breakpoint;
248
249 /* The pointer being re-enabled. */
250 Breakpoint * breakpoint_being_enabled;
251
Petr Machataa266acb2012-04-12 23:50:23 +0200252 /* Artificial atomic skip breakpoint, if any needed. */
253 void *atomic_skip_bp_addr;
254
Petr Machata98f09922011-07-09 10:55:29 +0200255 enum {
256 /* We are waiting for everyone to land in t/T. */
257 psh_stopping = 0,
258
259 /* We are doing the PTRACE_SINGLESTEP. */
260 psh_singlestep,
261
262 /* We are waiting for all the SIGSTOPs to arrive so
263 * that we can sink them. */
264 psh_sinking,
Petr Machata46d66ab2011-08-20 05:29:25 +0200265
266 /* This is for tracking the ugly workaround. */
267 psh_ugly_workaround,
Petr Machata98f09922011-07-09 10:55:29 +0200268 } state;
269
Petr Machata590c8082011-08-20 22:45:26 +0200270 int exiting;
271
Petr Machata98f09922011-07-09 10:55:29 +0200272 struct pid_set pids;
273};
274
Petr Machata98f09922011-07-09 10:55:29 +0200275static struct pid_task *
276get_task_info(struct pid_set * pids, pid_t pid)
277{
Petr Machata750ca8c2011-10-06 14:29:34 +0200278 assert(pid != 0);
Petr Machata98f09922011-07-09 10:55:29 +0200279 size_t i;
280 for (i = 0; i < pids->count; ++i)
281 if (pids->tasks[i].pid == pid)
282 return &pids->tasks[i];
283
284 return NULL;
285}
286
287static struct pid_task *
288add_task_info(struct pid_set * pids, pid_t pid)
289{
290 if (pids->count == pids->alloc) {
291 size_t ns = (2 * pids->alloc) ?: 4;
292 struct pid_task * n = realloc(pids->tasks,
293 sizeof(*pids->tasks) * ns);
294 if (n == NULL)
295 return NULL;
296 pids->tasks = n;
297 pids->alloc = ns;
298 }
299 struct pid_task * task_info = &pids->tasks[pids->count++];
300 memset(task_info, 0, sizeof(*task_info));
301 task_info->pid = pid;
302 return task_info;
303}
304
305static enum pcb_status
Petr Machatacbe29c62011-09-27 02:27:58 +0200306task_stopped(Process * task, void * data)
307{
308 enum process_status st = process_status(task->pid);
309 if (data != NULL)
310 *(enum process_status *)data = st;
311
312 /* If the task is already stopped, don't worry about it.
313 * Likewise if it managed to become a zombie or terminate in
314 * the meantime. This can happen when the whole thread group
315 * is terminating. */
316 switch (st) {
317 case ps_invalid:
318 case ps_tracing_stop:
319 case ps_zombie:
320 return pcb_cont;
Petr Machataffe4cd22012-04-11 18:01:44 +0200321 case ps_sleeping:
Petr Machata36d19822011-10-21 16:03:45 +0200322 case ps_stop:
323 case ps_other:
Petr Machatacbe29c62011-09-27 02:27:58 +0200324 return pcb_stop;
325 }
Petr Machata36d19822011-10-21 16:03:45 +0200326
327 abort ();
Petr Machatacbe29c62011-09-27 02:27:58 +0200328}
329
330/* Task is blocked if it's stopped, or if it's a vfork parent. */
331static enum pcb_status
332task_blocked(Process * task, void * data)
333{
334 struct pid_set * pids = data;
335 struct pid_task * task_info = get_task_info(pids, task->pid);
336 if (task_info != NULL
337 && task_info->vforked)
338 return pcb_cont;
339
340 return task_stopped(task, NULL);
341}
342
343static Event * process_vfork_on_event(Event_Handler * super, Event * event);
344
345static enum pcb_status
346task_vforked(Process * task, void * data)
347{
348 if (task->event_handler != NULL
349 && task->event_handler->on_event == &process_vfork_on_event)
350 return pcb_stop;
351 return pcb_cont;
352}
353
354static int
355is_vfork_parent(Process * task)
356{
357 return each_task(task->leader, &task_vforked, NULL) != NULL;
358}
359
360static enum pcb_status
Petr Machata98f09922011-07-09 10:55:29 +0200361send_sigstop(Process * task, void * data)
362{
363 Process * leader = task->leader;
364 struct pid_set * pids = data;
365
366 /* Look for pre-existing task record, or add new. */
367 struct pid_task * task_info = get_task_info(pids, task->pid);
368 if (task_info == NULL)
369 task_info = add_task_info(pids, task->pid);
370 if (task_info == NULL) {
371 perror("send_sigstop: add_task_info");
372 destroy_event_handler(leader);
373 /* Signal failure upwards. */
374 return pcb_stop;
375 }
376
377 /* This task still has not been attached to. It should be
378 stopped by the kernel. */
379 if (task->state == STATE_BEING_CREATED)
380 return pcb_cont;
381
382 /* Don't bother sending SIGSTOP if we are already stopped, or
Petr Machatacbe29c62011-09-27 02:27:58 +0200383 * if we sent the SIGSTOP already, which happens when we are
384 * handling "onexit" and inherited the handler from breakpoint
385 * re-enablement. */
386 enum process_status st;
387 if (task_stopped(task, &st) == pcb_cont)
Petr Machata98f09922011-07-09 10:55:29 +0200388 return pcb_cont;
389 if (task_info->sigstopped) {
390 if (!task_info->delivered)
391 return pcb_cont;
392 task_info->delivered = 0;
393 }
394
Petr Machatacbe29c62011-09-27 02:27:58 +0200395 /* Also don't attempt to stop the process if it's a parent of
396 * vforked process. We set up event handler specially to hint
397 * us. In that case parent is in D state, which we use to
398 * weed out unnecessary looping. */
399 if (st == ps_sleeping
400 && is_vfork_parent (task)) {
401 task_info->vforked = 1;
402 return pcb_cont;
403 }
404
Petr Machata98f09922011-07-09 10:55:29 +0200405 if (task_kill(task->pid, SIGSTOP) >= 0) {
406 debug(DEBUG_PROCESS, "send SIGSTOP to %d", task->pid);
407 task_info->sigstopped = 1;
408 } else
409 fprintf(stderr,
410 "Warning: couldn't send SIGSTOP to %d\n", task->pid);
411
412 return pcb_cont;
413}
414
Petr Machata73894bd2011-08-20 23:47:34 +0200415/* On certain kernels, detaching right after a singlestep causes the
416 tracee to be killed with a SIGTRAP (that even though the singlestep
417 was properly caught by waitpid. The ugly workaround is to put a
418 breakpoint where IP points and let the process continue. After
419 this the breakpoint can be retracted and the process detached. */
Petr Machata98f09922011-07-09 10:55:29 +0200420static void
Petr Machata73894bd2011-08-20 23:47:34 +0200421ugly_workaround(Process * proc)
Petr Machata590c8082011-08-20 22:45:26 +0200422{
423 void * ip = get_instruction_pointer(proc);
424 Breakpoint * sbp = dict_find_entry(proc->leader->breakpoints, ip);
425 if (sbp != NULL)
426 enable_breakpoint(proc, sbp);
427 else
428 insert_breakpoint(proc, ip, NULL, 1);
Petr Machata73894bd2011-08-20 23:47:34 +0200429 ptrace(PTRACE_CONT, proc->pid, 0, 0);
Petr Machata590c8082011-08-20 22:45:26 +0200430}
431
432static void
Petr Machata98f09922011-07-09 10:55:29 +0200433process_stopping_done(struct process_stopping_handler * self, Process * leader)
434{
435 debug(DEBUG_PROCESS, "process stopping done %d",
436 self->task_enabling_breakpoint->pid);
437 size_t i;
Petr Machata590c8082011-08-20 22:45:26 +0200438 if (!self->exiting) {
439 for (i = 0; i < self->pids.count; ++i)
440 if (self->pids.tasks[i].pid != 0
Petr Machata43d2fe52011-11-02 13:25:49 +0100441 && (self->pids.tasks[i].delivered
442 || self->pids.tasks[i].sysret))
Petr Machata590c8082011-08-20 22:45:26 +0200443 continue_process(self->pids.tasks[i].pid);
444 continue_process(self->task_enabling_breakpoint->pid);
445 destroy_event_handler(leader);
446 } else {
447 self->state = psh_ugly_workaround;
Petr Machata73894bd2011-08-20 23:47:34 +0200448 ugly_workaround(self->task_enabling_breakpoint);
Petr Machata590c8082011-08-20 22:45:26 +0200449 }
450}
451
452/* Before we detach, we need to make sure that task's IP is on the
453 * edge of an instruction. So for tasks that have a breakpoint event
454 * in the queue, we adjust the instruction pointer, just like
455 * continue_after_breakpoint does. */
456static enum ecb_status
457undo_breakpoint(Event * event, void * data)
458{
459 if (event != NULL
460 && event->proc->leader == data
461 && event->type == EVENT_BREAKPOINT)
462 set_instruction_pointer(event->proc, event->e_un.brk_addr);
463 return ecb_cont;
464}
465
466static enum pcb_status
467untrace_task(Process * task, void * data)
468{
469 if (task != data)
470 untrace_pid(task->pid);
471 return pcb_cont;
472}
473
474static enum pcb_status
475remove_task(Process * task, void * data)
476{
477 /* Don't untrace leader just yet. */
478 if (task != data)
479 remove_process(task);
480 return pcb_cont;
481}
482
483static void
484detach_process(Process * leader)
485{
486 each_qd_event(&undo_breakpoint, leader);
487 disable_all_breakpoints(leader);
488
489 /* Now untrace the process, if it was attached to by -p. */
490 struct opt_p_t * it;
491 for (it = opt_p; it != NULL; it = it->next) {
492 Process * proc = pid2proc(it->pid);
493 if (proc == NULL)
494 continue;
495 if (proc->leader == leader) {
496 each_task(leader, &untrace_task, NULL);
497 break;
498 }
499 }
500 each_task(leader, &remove_task, leader);
Petr Machata98f09922011-07-09 10:55:29 +0200501 destroy_event_handler(leader);
Petr Machata590c8082011-08-20 22:45:26 +0200502 remove_task(leader, NULL);
Petr Machata98f09922011-07-09 10:55:29 +0200503}
504
505static void
506handle_stopping_event(struct pid_task * task_info, Event ** eventp)
507{
508 /* Mark all events, so that we know whom to SIGCONT later. */
Petr Machata3c9b6292011-08-20 15:05:41 +0200509 if (task_info != NULL)
Petr Machata98f09922011-07-09 10:55:29 +0200510 task_info->got_event = 1;
511
512 Event * event = *eventp;
513
514 /* In every state, sink SIGSTOP events for tasks that it was
515 * sent to. */
516 if (task_info != NULL
517 && event->type == EVENT_SIGNAL
518 && event->e_un.signum == SIGSTOP) {
519 debug(DEBUG_PROCESS, "SIGSTOP delivered to %d", task_info->pid);
520 if (task_info->sigstopped
521 && !task_info->delivered) {
522 task_info->delivered = 1;
523 *eventp = NULL; // sink the event
524 } else
525 fprintf(stderr, "suspicious: %d got SIGSTOP, but "
526 "sigstopped=%d and delivered=%d\n",
527 task_info->pid, task_info->sigstopped,
528 task_info->delivered);
Juan Cespedese74c80d2009-02-11 11:32:31 +0100529 }
Juan Cespedes5e01f651998-03-08 22:31:44 +0100530}
531
Petr Machata98f09922011-07-09 10:55:29 +0200532/* Some SIGSTOPs may have not been delivered to their respective tasks
533 * yet. They are still in the queue. If we have seen an event for
534 * that process, continue it, so that the SIGSTOP can be delivered and
Petr Machata36d19822011-10-21 16:03:45 +0200535 * caught by ltrace. We don't mind that the process is after
536 * breakpoint (and therefore potentially doesn't have aligned IP),
537 * because the signal will be delivered without the process actually
538 * starting. */
Petr Machata98f09922011-07-09 10:55:29 +0200539static void
540continue_for_sigstop_delivery(struct pid_set * pids)
541{
542 size_t i;
543 for (i = 0; i < pids->count; ++i) {
Petr Machata750ca8c2011-10-06 14:29:34 +0200544 if (pids->tasks[i].pid != 0
545 && pids->tasks[i].sigstopped
Petr Machata98f09922011-07-09 10:55:29 +0200546 && !pids->tasks[i].delivered
547 && pids->tasks[i].got_event) {
548 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery",
549 pids->tasks[i].pid);
550 ptrace(PTRACE_SYSCALL, pids->tasks[i].pid, 0, 0);
551 }
552 }
Juan Cespedes5e01f651998-03-08 22:31:44 +0100553}
554
Petr Machata98f09922011-07-09 10:55:29 +0200555static int
Petr Machata750ca8c2011-10-06 14:29:34 +0200556event_exit_p(Event * event)
557{
558 return event != NULL && (event->type == EVENT_EXIT
559 || event->type == EVENT_EXIT_SIGNAL);
560}
561
562static int
Petr Machata98f09922011-07-09 10:55:29 +0200563event_exit_or_none_p(Event * event)
Petr Machataf789c9c2011-07-09 10:54:27 +0200564{
Petr Machata750ca8c2011-10-06 14:29:34 +0200565 return event == NULL || event_exit_p(event)
Petr Machata98f09922011-07-09 10:55:29 +0200566 || event->type == EVENT_NONE;
567}
568
569static int
570await_sigstop_delivery(struct pid_set * pids, struct pid_task * task_info,
571 Event * event)
572{
573 /* If we still didn't get our SIGSTOP, continue the process
574 * and carry on. */
575 if (event != NULL && !event_exit_or_none_p(event)
576 && task_info != NULL && task_info->sigstopped) {
577 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery",
578 task_info->pid);
579 /* We should get the signal the first thing
580 * after this, so it should be OK to continue
581 * even if we are over a breakpoint. */
582 ptrace(PTRACE_SYSCALL, task_info->pid, 0, 0);
583
584 } else {
585 /* If all SIGSTOPs were delivered, uninstall the
586 * handler and continue everyone. */
587 /* XXX I suspect that we should check tasks that are
588 * still around. Is things are now, there should be a
589 * race between waiting for everyone to stop and one
590 * of the tasks exiting. */
591 int all_clear = 1;
592 size_t i;
593 for (i = 0; i < pids->count; ++i)
Petr Machata750ca8c2011-10-06 14:29:34 +0200594 if (pids->tasks[i].pid != 0
595 && pids->tasks[i].sigstopped
Petr Machata98f09922011-07-09 10:55:29 +0200596 && !pids->tasks[i].delivered) {
597 all_clear = 0;
598 break;
599 }
600 return all_clear;
601 }
602
603 return 0;
604}
605
Petr Machata590c8082011-08-20 22:45:26 +0200606static int
607all_stops_accountable(struct pid_set * pids)
608{
609 size_t i;
610 for (i = 0; i < pids->count; ++i)
611 if (pids->tasks[i].pid != 0
612 && !pids->tasks[i].got_event
613 && !have_events_for(pids->tasks[i].pid))
614 return 0;
615 return 1;
616}
617
Petr Machataa266acb2012-04-12 23:50:23 +0200618/* The protocol is: 0 for success, negative for failure, positive if
619 * default singlestep is to be used. */
620int arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp,
621 int (*add_cb)(void *addr, void *data),
622 void *add_cb_data);
623
624#ifndef ARCH_HAVE_ATOMIC_SINGLESTEP
625int
626arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp,
627 int (*add_cb)(void *addr, void *data),
628 void *add_cb_data)
Petr Machata06986d52011-11-02 13:22:46 +0100629{
Petr Machataa266acb2012-04-12 23:50:23 +0200630 return 1;
631}
632#endif
633
634static int
635atomic_singlestep_add_bp(void *addr, void *data)
636{
637 struct process_stopping_handler *self = data;
638 struct Process *proc = self->task_enabling_breakpoint;
639
640 /* Only support single address as of now. */
641 assert(self->atomic_skip_bp_addr == NULL);
642
643 self->atomic_skip_bp_addr = addr + 4;
644 insert_breakpoint(proc->leader, self->atomic_skip_bp_addr, NULL, 1);
645
646 return 0;
647}
648
649static int
650singlestep(struct process_stopping_handler *self)
651{
652 struct Process *proc = self->task_enabling_breakpoint;
653
654 int status = arch_atomic_singlestep(self->task_enabling_breakpoint,
655 self->breakpoint_being_enabled,
656 &atomic_singlestep_add_bp, self);
657
658 /* Propagate failure and success. */
659 if (status <= 0)
660 return status;
661
662 /* Otherwise do the default action: singlestep. */
Petr Machata06986d52011-11-02 13:22:46 +0100663 debug(1, "PTRACE_SINGLESTEP");
Petr Machataa266acb2012-04-12 23:50:23 +0200664 if (ptrace(PTRACE_SINGLESTEP, proc->pid, 0, 0)) {
Petr Machata06986d52011-11-02 13:22:46 +0100665 perror("PTRACE_SINGLESTEP");
Petr Machataa266acb2012-04-12 23:50:23 +0200666 return -1;
667 }
668 return 0;
669}
670
671static void
672post_singlestep(struct process_stopping_handler *self, Event **eventp)
673{
674 continue_for_sigstop_delivery(&self->pids);
675
676 if ((*eventp)->type == EVENT_BREAKPOINT)
677 *eventp = NULL; // handled
678
679 if (self->atomic_skip_bp_addr != 0)
680 delete_breakpoint(self->task_enabling_breakpoint->leader,
681 self->atomic_skip_bp_addr);
682
683 self->breakpoint_being_enabled = NULL;
684}
685
686static void
687singlestep_error(struct process_stopping_handler *self, Event **eventp)
688{
689 struct Process *teb = self->task_enabling_breakpoint;
690 Breakpoint *sbp = self->breakpoint_being_enabled;
691 fprintf(stderr, "%d couldn't singlestep over %s (%p)\n",
692 teb->pid, sbp->libsym != NULL ? sbp->libsym->name : NULL,
693 sbp->addr);
694 delete_breakpoint(teb->leader, sbp->addr);
695 post_singlestep(self, eventp);
Petr Machata06986d52011-11-02 13:22:46 +0100696}
697
Petr Machata98f09922011-07-09 10:55:29 +0200698/* This event handler is installed when we are in the process of
699 * stopping the whole thread group to do the pointer re-enablement for
700 * one of the threads. We pump all events to the queue for later
701 * processing while we wait for all the threads to stop. When this
702 * happens, we let the re-enablement thread to PTRACE_SINGLESTEP,
703 * re-enable, and continue everyone. */
704static Event *
705process_stopping_on_event(Event_Handler * super, Event * event)
706{
707 struct process_stopping_handler * self = (void *)super;
708 Process * task = event->proc;
709 Process * leader = task->leader;
Petr Machatae21264e2011-10-06 14:30:33 +0200710 Breakpoint * sbp = self->breakpoint_being_enabled;
711 Process * teb = self->task_enabling_breakpoint;
Petr Machata98f09922011-07-09 10:55:29 +0200712
713 debug(DEBUG_PROCESS,
714 "pid %d; event type %d; state %d",
715 task->pid, event->type, self->state);
716
717 struct pid_task * task_info = get_task_info(&self->pids, task->pid);
718 if (task_info == NULL)
719 fprintf(stderr, "new task??? %d\n", task->pid);
720 handle_stopping_event(task_info, &event);
721
722 int state = self->state;
723 int event_to_queue = !event_exit_or_none_p(event);
724
Petr Machata18c97072011-10-06 14:30:11 +0200725 /* Deactivate the entry if the task exits. */
726 if (event_exit_p(event) && task_info != NULL)
727 task_info->pid = 0;
728
Petr Machata43d2fe52011-11-02 13:25:49 +0100729 /* Always handle sysrets. Whether sysret occurred and what
730 * sys it rets from may need to be determined based on process
731 * stack, so we need to keep that in sync with reality. Note
732 * that we don't continue the process after the sysret is
733 * handled. See continue_after_syscall. */
734 if (event != NULL && event->type == EVENT_SYSRET) {
735 debug(1, "%d LT_EV_SYSRET", event->proc->pid);
736 event_to_queue = 0;
737 task_info->sysret = 1;
738 }
739
Petr Machata98f09922011-07-09 10:55:29 +0200740 switch (state) {
741 case psh_stopping:
742 /* If everyone is stopped, singlestep. */
Petr Machatacbe29c62011-09-27 02:27:58 +0200743 if (each_task(leader, &task_blocked, &self->pids) == NULL) {
Petr Machata98f09922011-07-09 10:55:29 +0200744 debug(DEBUG_PROCESS, "all stopped, now SINGLESTEP %d",
Petr Machatae21264e2011-10-06 14:30:33 +0200745 teb->pid);
746 if (sbp->enabled)
747 disable_breakpoint(teb, sbp);
Petr Machataa266acb2012-04-12 23:50:23 +0200748 if (singlestep(self) < 0) {
749 singlestep_error(self, &event);
750 goto psh_sinking;
751 }
752
Petr Machata98f09922011-07-09 10:55:29 +0200753 self->state = state = psh_singlestep;
754 }
755 break;
756
Petr Machata06986d52011-11-02 13:22:46 +0100757 case psh_singlestep:
Petr Machata98f09922011-07-09 10:55:29 +0200758 /* In singlestep state, breakpoint signifies that we
759 * have now stepped, and can re-enable the breakpoint. */
Petr Machatae21264e2011-10-06 14:30:33 +0200760 if (event != NULL && task == teb) {
Petr Machatad5d93c42011-10-21 16:41:10 +0200761
Petr Machata06986d52011-11-02 13:22:46 +0100762 /* This is not the singlestep that we are waiting for. */
Petr Machatad5d93c42011-10-21 16:41:10 +0200763 if (event->type == EVENT_SIGNAL) {
Petr Machataa266acb2012-04-12 23:50:23 +0200764 if (singlestep(self) < 0) {
765 singlestep_error(self, &event);
766 goto psh_sinking;
767 }
Petr Machatad5d93c42011-10-21 16:41:10 +0200768 break;
769 }
770
Petr Machata98f09922011-07-09 10:55:29 +0200771 /* Essentially we don't care what event caused
772 * the thread to stop. We can do the
773 * re-enablement now. */
Petr Machata590c8082011-08-20 22:45:26 +0200774 if (sbp->enabled)
775 enable_breakpoint(teb, sbp);
Petr Machata98f09922011-07-09 10:55:29 +0200776
Petr Machataa266acb2012-04-12 23:50:23 +0200777 post_singlestep(self, &event);
778 goto psh_sinking;
779 }
780 break;
Petr Machata98f09922011-07-09 10:55:29 +0200781
Petr Machataa266acb2012-04-12 23:50:23 +0200782 psh_sinking:
783 state = self->state = psh_sinking;
Petr Machata98f09922011-07-09 10:55:29 +0200784 case psh_sinking:
785 if (await_sigstop_delivery(&self->pids, task_info, event))
786 process_stopping_done(self, leader);
Petr Machata590c8082011-08-20 22:45:26 +0200787 break;
788
789 case psh_ugly_workaround:
790 if (event == NULL)
791 break;
792 if (event->type == EVENT_BREAKPOINT) {
793 undo_breakpoint(event, leader);
794 if (task == teb)
795 self->task_enabling_breakpoint = NULL;
796 }
797 if (self->task_enabling_breakpoint == NULL
798 && all_stops_accountable(&self->pids)) {
799 undo_breakpoint(event, leader);
800 detach_process(leader);
801 event = NULL; // handled
802 }
Petr Machata98f09922011-07-09 10:55:29 +0200803 }
804
805 if (event != NULL && event_to_queue) {
806 enque_event(event);
807 event = NULL; // sink the event
808 }
809
810 return event;
811}
812
813static void
814process_stopping_destroy(Event_Handler * super)
815{
816 struct process_stopping_handler * self = (void *)super;
Petr Machata98f09922011-07-09 10:55:29 +0200817 free(self->pids.tasks);
Juan Cespedes5e01f651998-03-08 22:31:44 +0100818}
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +0100819
Juan Cespedesf1350522008-12-16 18:19:58 +0100820void
Petr Machata26627682011-07-08 18:15:32 +0200821continue_after_breakpoint(Process *proc, Breakpoint *sbp)
822{
Juan Cespedes5c3fe062004-06-14 18:08:37 +0200823 set_instruction_pointer(proc, sbp->addr);
Juan Cespedes8f8282f2002-03-03 18:58:40 +0100824 if (sbp->enabled == 0) {
825 continue_process(proc->pid);
826 } else {
Petr Machata26627682011-07-08 18:15:32 +0200827 debug(DEBUG_PROCESS,
828 "continue_after_breakpoint: pid=%d, addr=%p",
829 proc->pid, sbp->addr);
Arnaud Patardf3d1c532010-01-08 08:40:04 -0500830#if defined __sparc__ || defined __ia64___ || defined __mips__
Ian Wienand9a2ad352006-02-20 22:44:45 +0100831 /* we don't want to singlestep here */
Juan Cespedes5c3fe062004-06-14 18:08:37 +0200832 continue_process(proc->pid);
833#else
Petr Machata98f09922011-07-09 10:55:29 +0200834 struct process_stopping_handler * handler
835 = calloc(sizeof(*handler), 1);
836 if (handler == NULL) {
837 perror("malloc breakpoint disable handler");
838 fatal:
839 /* Carry on not bothering to re-enable. */
840 continue_process(proc->pid);
841 return;
842 }
843
844 handler->super.on_event = process_stopping_on_event;
845 handler->super.destroy = process_stopping_destroy;
846 handler->task_enabling_breakpoint = proc;
847 handler->breakpoint_being_enabled = sbp;
848 install_event_handler(proc->leader, &handler->super);
849
850 if (each_task(proc->leader, &send_sigstop,
851 &handler->pids) != NULL)
852 goto fatal;
853
854 /* And deliver the first fake event, in case all the
855 * conditions are already fulfilled. */
856 Event ev;
857 ev.type = EVENT_NONE;
858 ev.proc = proc;
859 process_stopping_on_event(&handler->super, &ev);
Juan Cespedes5c3fe062004-06-14 18:08:37 +0200860#endif
Juan Cespedes8f8282f2002-03-03 18:58:40 +0100861 }
862}
863
Petr Machata602330f2011-07-09 11:15:34 +0200864/**
865 * Ltrace exit. When we are about to exit, we have to go through all
866 * the processes, stop them all, remove all the breakpoints, and then
867 * detach the processes that we attached to using -p. If we left the
868 * other tasks running, they might hit stray return breakpoints and
869 * produce artifacts, so we better stop everyone, even if it's a bit
870 * of extra work.
871 */
872struct ltrace_exiting_handler
873{
874 Event_Handler super;
875 struct pid_set pids;
876};
877
Petr Machata602330f2011-07-09 11:15:34 +0200878static Event *
879ltrace_exiting_on_event(Event_Handler * super, Event * event)
880{
881 struct ltrace_exiting_handler * self = (void *)super;
882 Process * task = event->proc;
883 Process * leader = task->leader;
884
885 debug(DEBUG_PROCESS, "pid %d; event type %d", task->pid, event->type);
886
887 struct pid_task * task_info = get_task_info(&self->pids, task->pid);
888 handle_stopping_event(task_info, &event);
889
Petr Machata590c8082011-08-20 22:45:26 +0200890 if (event != NULL && event->type == EVENT_BREAKPOINT)
891 undo_breakpoint(event, leader);
Petr Machata4b9f4d92011-08-20 04:07:05 +0200892
893 if (await_sigstop_delivery(&self->pids, task_info, event)
Petr Machata590c8082011-08-20 22:45:26 +0200894 && all_stops_accountable(&self->pids))
895 detach_process(leader);
Petr Machata602330f2011-07-09 11:15:34 +0200896
897 /* Sink all non-exit events. We are about to exit, so we
898 * don't bother with queuing them. */
899 if (event_exit_or_none_p(event))
900 return event;
Petr Machata13d5df72011-08-19 23:15:15 +0200901
Petr Machata13d5df72011-08-19 23:15:15 +0200902 return NULL;
Petr Machata602330f2011-07-09 11:15:34 +0200903}
904
905static void
906ltrace_exiting_destroy(Event_Handler * super)
907{
908 struct ltrace_exiting_handler * self = (void *)super;
909 free(self->pids.tasks);
910}
911
912static int
913ltrace_exiting_install_handler(Process * proc)
914{
915 /* Only install to leader. */
916 if (proc->leader != proc)
917 return 0;
918
919 /* Perhaps we are already installed, if the user passed
920 * several -p options that are tasks of one process. */
921 if (proc->event_handler != NULL
922 && proc->event_handler->on_event == &ltrace_exiting_on_event)
923 return 0;
924
Petr Machata590c8082011-08-20 22:45:26 +0200925 /* If stopping handler is already present, let it do the
926 * work. */
927 if (proc->event_handler != NULL) {
928 assert(proc->event_handler->on_event
929 == &process_stopping_on_event);
930 struct process_stopping_handler * other
931 = (void *)proc->event_handler;
932 other->exiting = 1;
933 return 0;
934 }
935
Petr Machata602330f2011-07-09 11:15:34 +0200936 struct ltrace_exiting_handler * handler
937 = calloc(sizeof(*handler), 1);
938 if (handler == NULL) {
939 perror("malloc exiting handler");
940 fatal:
941 /* XXXXXXXXXXXXXXXXXXX fixme */
942 return -1;
943 }
944
Petr Machata602330f2011-07-09 11:15:34 +0200945 handler->super.on_event = ltrace_exiting_on_event;
946 handler->super.destroy = ltrace_exiting_destroy;
947 install_event_handler(proc->leader, &handler->super);
948
949 if (each_task(proc->leader, &send_sigstop,
950 &handler->pids) != NULL)
951 goto fatal;
952
953 return 0;
954}
955
Petr Machatacbe29c62011-09-27 02:27:58 +0200956/*
957 * When the traced process vforks, it's suspended until the child
958 * process calls _exit or exec*. In the meantime, the two share the
959 * address space.
960 *
961 * The child process should only ever call _exit or exec*, but we
962 * can't count on that (it's not the role of ltrace to policy, but to
963 * observe). In any case, we will _at least_ have to deal with
964 * removal of vfork return breakpoint (which we have to smuggle back
965 * in, so that the parent can see it, too), and introduction of exec*
966 * return breakpoint. Since we already have both breakpoint actions
967 * to deal with, we might as well support it all.
968 *
969 * The gist is that we pretend that the child is in a thread group
970 * with its parent, and handle it as a multi-threaded case, with the
971 * exception that we know that the parent is blocked, and don't
972 * attempt to stop it. When the child execs, we undo the setup.
Petr Machatacbe29c62011-09-27 02:27:58 +0200973 */
974
Petr Machata134a1082011-09-27 20:25:58 +0200975struct process_vfork_handler
976{
977 Event_Handler super;
978 void * bp_addr;
979};
980
Petr Machatacbe29c62011-09-27 02:27:58 +0200981static Event *
982process_vfork_on_event(Event_Handler * super, Event * event)
983{
984 struct process_vfork_handler * self = (void *)super;
Petr Machata134a1082011-09-27 20:25:58 +0200985 Breakpoint * sbp;
Petr Machatacbe29c62011-09-27 02:27:58 +0200986 assert(self != NULL);
987
988 switch (event->type) {
Petr Machata134a1082011-09-27 20:25:58 +0200989 case EVENT_BREAKPOINT:
990 /* Remember the vfork return breakpoint. */
991 if (self->bp_addr == NULL)
992 self->bp_addr = event->e_un.brk_addr;
993 break;
994
Petr Machatacbe29c62011-09-27 02:27:58 +0200995 case EVENT_EXIT:
996 case EVENT_EXIT_SIGNAL:
997 case EVENT_EXEC:
Petr Machata134a1082011-09-27 20:25:58 +0200998 /* Smuggle back in the vfork return breakpoint, so
999 * that our parent can trip over it once again. */
1000 if (self->bp_addr != NULL) {
1001 sbp = dict_find_entry(event->proc->leader->breakpoints,
1002 self->bp_addr);
1003 if (sbp != NULL)
Petr Machata3797cd62011-10-03 19:23:37 +02001004 insert_breakpoint(event->proc->parent,
1005 self->bp_addr,
1006 sbp->libsym, 1);
Petr Machata134a1082011-09-27 20:25:58 +02001007 }
1008
Petr Machataba9911f2011-09-27 21:09:47 +02001009 continue_process(event->proc->parent->pid);
Petr Machata134a1082011-09-27 20:25:58 +02001010
1011 /* Remove the leader that we artificially set up
1012 * earlier. */
Petr Machatacbe29c62011-09-27 02:27:58 +02001013 change_process_leader(event->proc, event->proc);
1014 destroy_event_handler(event->proc);
1015
Petr Machatacbe29c62011-09-27 02:27:58 +02001016 default:
1017 ;
1018 }
1019
1020 return event;
1021}
1022
1023void
1024continue_after_vfork(Process * proc)
1025{
1026 debug(DEBUG_PROCESS, "continue_after_vfork: pid=%d", proc->pid);
Petr Machata134a1082011-09-27 20:25:58 +02001027 struct process_vfork_handler * handler = calloc(sizeof(*handler), 1);
Petr Machatacbe29c62011-09-27 02:27:58 +02001028 if (handler == NULL) {
1029 perror("malloc vfork handler");
1030 /* Carry on not bothering to treat the process as
1031 * necessary. */
1032 continue_process(proc->parent->pid);
1033 return;
1034 }
1035
1036 /* We must set up custom event handler, so that we see
1037 * exec/exit events for the task itself. */
Petr Machata134a1082011-09-27 20:25:58 +02001038 handler->super.on_event = process_vfork_on_event;
1039 install_event_handler(proc, &handler->super);
Petr Machatacbe29c62011-09-27 02:27:58 +02001040
1041 /* Make sure that the child is sole thread. */
1042 assert(proc->leader == proc);
1043 assert(proc->next == NULL || proc->next->leader != proc);
1044
1045 /* Make sure that the child's parent is properly set up. */
1046 assert(proc->parent != NULL);
1047 assert(proc->parent->leader != NULL);
1048
1049 change_process_leader(proc, proc->parent->leader);
Petr Machatacbe29c62011-09-27 02:27:58 +02001050}
1051
Petr Machata9d29b3e2011-11-09 16:46:56 +01001052static int
1053is_mid_stopping(Process *proc)
1054{
1055 return proc != NULL
1056 && proc->event_handler != NULL
1057 && proc->event_handler->on_event == &process_stopping_on_event;
1058}
1059
Petr Machata43d2fe52011-11-02 13:25:49 +01001060void
1061continue_after_syscall(Process * proc, int sysnum, int ret_p)
1062{
1063 /* Don't continue if we are mid-stopping. */
Petr Machata9d29b3e2011-11-09 16:46:56 +01001064 if (ret_p && (is_mid_stopping(proc) || is_mid_stopping(proc->leader))) {
1065 debug(DEBUG_PROCESS,
1066 "continue_after_syscall: don't continue %d",
1067 proc->pid);
Petr Machata43d2fe52011-11-02 13:25:49 +01001068 return;
Petr Machata9d29b3e2011-11-09 16:46:56 +01001069 }
Petr Machata43d2fe52011-11-02 13:25:49 +01001070 continue_process(proc->pid);
1071}
1072
Petr Machata602330f2011-07-09 11:15:34 +02001073/* If ltrace gets SIGINT, the processes directly or indirectly run by
1074 * ltrace get it too. We just have to wait long enough for the signal
1075 * to be delivered and the process terminated, which we notice and
1076 * exit ltrace, too. So there's not much we need to do there. We
1077 * want to keep tracing those processes as usual, in case they just
1078 * SIG_IGN the SIGINT to do their shutdown etc.
1079 *
1080 * For processes ran on the background, we want to install an exit
1081 * handler that stops all the threads, removes all breakpoints, and
1082 * detaches.
1083 */
1084void
Petr Machataffe4cd22012-04-11 18:01:44 +02001085os_ltrace_exiting(void)
Petr Machata602330f2011-07-09 11:15:34 +02001086{
1087 struct opt_p_t * it;
1088 for (it = opt_p; it != NULL; it = it->next) {
1089 Process * proc = pid2proc(it->pid);
1090 if (proc == NULL || proc->leader == NULL)
1091 continue;
1092 if (ltrace_exiting_install_handler(proc->leader) < 0)
1093 fprintf(stderr,
1094 "Couldn't install exiting handler for %d.\n",
1095 proc->pid);
1096 }
1097}
1098
Petr Machataffe4cd22012-04-11 18:01:44 +02001099int
1100os_ltrace_exiting_sighandler(void)
1101{
1102 extern int linux_in_waitpid;
1103 if (linux_in_waitpid) {
1104 os_ltrace_exiting();
1105 return 1;
1106 }
1107 return 0;
1108}
1109
Joe Damatodfa3fa32010-11-08 15:47:35 -08001110size_t
1111umovebytes(Process *proc, void *addr, void *laddr, size_t len) {
1112
1113 union {
1114 long a;
1115 char c[sizeof(long)];
1116 } a;
Zachary T Welchba6aca22010-12-08 18:55:09 -08001117 int started = 0;
1118 size_t offset = 0, bytes_read = 0;
Joe Damatodfa3fa32010-11-08 15:47:35 -08001119
1120 while (offset < len) {
1121 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0);
1122 if (a.a == -1 && errno) {
1123 if (started && errno == EIO)
1124 return bytes_read;
1125 else
1126 return -1;
1127 }
1128 started = 1;
1129
1130 if (len - offset >= sizeof(long)) {
1131 memcpy(laddr + offset, &a.c[0], sizeof(long));
1132 bytes_read += sizeof(long);
1133 }
1134 else {
1135 memcpy(laddr + offset, &a.c[0], len - offset);
1136 bytes_read += (len - offset);
1137 }
1138 offset += sizeof(long);
1139 }
1140
1141 return bytes_read;
1142}
1143
Steve Fink7bafff02006-08-07 04:50:42 +02001144/* Read a series of bytes starting at the process's memory address
1145 'addr' and continuing until a NUL ('\0') is seen or 'len' bytes
1146 have been read.
1147*/
Juan Cespedesf1350522008-12-16 18:19:58 +01001148int
Juan Cespedesa8909f72009-04-28 20:02:41 +02001149umovestr(Process *proc, void *addr, int len, void *laddr) {
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001150 union {
1151 long a;
1152 char c[sizeof(long)];
1153 } a;
Zachary T Welchba6aca22010-12-08 18:55:09 -08001154 unsigned i;
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001155 int offset = 0;
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001156
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001157 while (offset < len) {
1158 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0);
1159 for (i = 0; i < sizeof(long); i++) {
Paul Gilliam3f1219f2006-04-24 18:25:38 +02001160 if (a.c[i] && offset + (signed)i < len) {
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001161 *(char *)(laddr + offset + i) = a.c[i];
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001162 } else {
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001163 *(char *)(laddr + offset + i) = '\0';
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001164 return 0;
1165 }
1166 }
1167 offset += sizeof(long);
1168 }
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001169 *(char *)(laddr + offset) = '\0';
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001170 return 0;
1171}