blob: 20c42a885f4fac3989ad5f6063db543157482268 [file] [log] [blame]
Petr Machatacec06ec2012-04-10 13:31:55 +02001#include "config.h"
2
3#include <asm/unistd.h>
4#include <sys/types.h>
5#include <sys/wait.h>
6#include <assert.h>
7#include <errno.h>
Juan Cespedes5e01f651998-03-08 22:31:44 +01008#include <stdio.h>
Juan Cespedes504a3852003-02-04 23:24:38 +01009#include <stdlib.h>
Juan Cespedes1fe93d51998-03-13 00:29:21 +010010#include <string.h>
Juan Cespedes8f8282f2002-03-03 18:58:40 +010011#include <unistd.h>
Juan Cespedes5e01f651998-03-08 22:31:44 +010012
Petr Machatacec06ec2012-04-10 13:31:55 +020013#ifdef HAVE_LIBSELINUX
14# include <selinux/selinux.h>
15#endif
16
17#include "ptrace.h"
Juan Cespedesf7281232009-06-25 16:11:21 +020018#include "common.h"
Petr Machata9294d822012-02-07 12:35:58 +010019#include "breakpoint.h"
Petr Machata366c2f42012-02-09 19:34:36 +010020#include "proc.h"
Petr Machata55ed83b2007-05-17 16:24:15 +020021
22/* If the system headers did not provide the constants, hard-code the normal
23 values. */
24#ifndef PTRACE_EVENT_FORK
25
26#define PTRACE_OLDSETOPTIONS 21
27#define PTRACE_SETOPTIONS 0x4200
28#define PTRACE_GETEVENTMSG 0x4201
29
30/* options set using PTRACE_SETOPTIONS */
31#define PTRACE_O_TRACESYSGOOD 0x00000001
32#define PTRACE_O_TRACEFORK 0x00000002
33#define PTRACE_O_TRACEVFORK 0x00000004
34#define PTRACE_O_TRACECLONE 0x00000008
35#define PTRACE_O_TRACEEXEC 0x00000010
36#define PTRACE_O_TRACEVFORKDONE 0x00000020
37#define PTRACE_O_TRACEEXIT 0x00000040
38
39/* Wait extended result codes for the above trace options. */
40#define PTRACE_EVENT_FORK 1
41#define PTRACE_EVENT_VFORK 2
42#define PTRACE_EVENT_CLONE 3
43#define PTRACE_EVENT_EXEC 4
44#define PTRACE_EVENT_VFORK_DONE 5
45#define PTRACE_EVENT_EXIT 6
46
47#endif /* PTRACE_EVENT_FORK */
Ian Wienand9a2ad352006-02-20 22:44:45 +010048
Luis Machado55c5feb2008-03-12 15:56:01 +010049#ifdef ARCH_HAVE_UMOVELONG
Juan Cespedesa8909f72009-04-28 20:02:41 +020050extern int arch_umovelong (Process *, void *, long *, arg_type_info *);
Juan Cespedesf1350522008-12-16 18:19:58 +010051int
Juan Cespedesa8909f72009-04-28 20:02:41 +020052umovelong (Process *proc, void *addr, long *result, arg_type_info *info) {
Luis Machado55c5feb2008-03-12 15:56:01 +010053 return arch_umovelong (proc, addr, result, info);
54}
55#else
56/* Read a single long from the process's memory address 'addr' */
Juan Cespedesf1350522008-12-16 18:19:58 +010057int
Juan Cespedesa8909f72009-04-28 20:02:41 +020058umovelong (Process *proc, void *addr, long *result, arg_type_info *info) {
Luis Machado55c5feb2008-03-12 15:56:01 +010059 long pointed_to;
60
61 errno = 0;
62 pointed_to = ptrace (PTRACE_PEEKTEXT, proc->pid, addr, 0);
63 if (pointed_to == -1 && errno)
64 return -errno;
65
66 *result = pointed_to;
Arnaud Patardf16fcff2010-01-08 08:40:19 -050067 if (info) {
68 switch(info->type) {
69 case ARGTYPE_INT:
70 *result &= 0x00000000ffffffffUL;
71 default:
72 break;
73 };
74 }
Luis Machado55c5feb2008-03-12 15:56:01 +010075 return 0;
76}
77#endif
78
Juan Cespedesf1350522008-12-16 18:19:58 +010079void
Petr Machatacec06ec2012-04-10 13:31:55 +020080trace_fail_warning(pid_t pid)
81{
82 /* This was adapted from GDB. */
83#ifdef HAVE_LIBSELINUX
84 static int checked = 0;
85 if (checked)
86 return;
87 checked = 1;
88
89 /* -1 is returned for errors, 0 if it has no effect, 1 if
90 * PTRACE_ATTACH is forbidden. */
91 if (security_get_boolean_active("deny_ptrace") == 1)
92 fprintf(stderr,
93"The SELinux boolean 'deny_ptrace' is enabled, which may prevent ltrace from\n"
94"tracing other processes. You can disable this process attach protection by\n"
95"issuing 'setsebool deny_ptrace=0' in the superuser context.\n");
96#endif /* HAVE_LIBSELINUX */
97}
98
99void
100trace_me(void)
101{
Petr Machata26627682011-07-08 18:15:32 +0200102 debug(DEBUG_PROCESS, "trace_me: pid=%d", getpid());
Ian Wienand2d45b1a2006-02-20 22:48:07 +0100103 if (ptrace(PTRACE_TRACEME, 0, 1, 0) < 0) {
Juan Cespedes5e01f651998-03-08 22:31:44 +0100104 perror("PTRACE_TRACEME");
Petr Machatacec06ec2012-04-10 13:31:55 +0200105 trace_fail_warning(getpid());
Juan Cespedes5e01f651998-03-08 22:31:44 +0100106 exit(1);
107 }
108}
109
Petr Machatab4f9e0c2012-02-07 01:57:59 +0100110/* There's a (hopefully) brief period of time after the child process
Petr Machatac805c622012-03-02 00:10:37 +0100111 * forks when we can't trace it yet. Here we wait for kernel to
Petr Machatab4f9e0c2012-02-07 01:57:59 +0100112 * prepare the process. */
Petr Machatac805c622012-03-02 00:10:37 +0100113int
Petr Machatab4f9e0c2012-02-07 01:57:59 +0100114wait_for_proc(pid_t pid)
115{
Petr Machatac805c622012-03-02 00:10:37 +0100116 /* man ptrace: PTRACE_ATTACH attaches to the process specified
117 in pid. The child is sent a SIGSTOP, but will not
118 necessarily have stopped by the completion of this call;
119 use wait() to wait for the child to stop. */
120 if (waitpid(pid, NULL, __WALL) != pid) {
121 perror ("trace_pid: waitpid");
122 return -1;
Petr Machatab4f9e0c2012-02-07 01:57:59 +0100123 }
124
Petr Machatac805c622012-03-02 00:10:37 +0100125 return 0;
Petr Machatab4f9e0c2012-02-07 01:57:59 +0100126}
127
Juan Cespedesf1350522008-12-16 18:19:58 +0100128int
Petr Machatacec06ec2012-04-10 13:31:55 +0200129trace_pid(pid_t pid)
130{
Petr Machata26627682011-07-08 18:15:32 +0200131 debug(DEBUG_PROCESS, "trace_pid: pid=%d", pid);
Petr Machatacec06ec2012-04-10 13:31:55 +0200132 /* This shouldn't emit error messages, as there are legitimate
133 * reasons that the PID can't be attached: like it may have
134 * already ended. */
135 if (ptrace(PTRACE_ATTACH, pid, 1, 0) < 0)
Juan Cespedes273ea6d1998-03-14 23:02:40 +0100136 return -1;
Petr Machata89a53602007-01-25 18:05:44 +0100137
Petr Machatac805c622012-03-02 00:10:37 +0100138 return wait_for_proc(pid);
Juan Cespedes273ea6d1998-03-14 23:02:40 +0100139}
140
Juan Cespedesf1350522008-12-16 18:19:58 +0100141void
Juan Cespedesa8909f72009-04-28 20:02:41 +0200142trace_set_options(Process *proc, pid_t pid) {
Ian Wienand9a2ad352006-02-20 22:44:45 +0100143 if (proc->tracesysgood & 0x80)
144 return;
Petr Machata55ed83b2007-05-17 16:24:15 +0200145
Petr Machata26627682011-07-08 18:15:32 +0200146 debug(DEBUG_PROCESS, "trace_set_options: pid=%d", pid);
Juan Cespedescd8976d2009-05-14 13:47:58 +0200147
Juan Cespedes1e583132009-04-07 18:17:11 +0200148 long options = PTRACE_O_TRACESYSGOOD | PTRACE_O_TRACEFORK |
149 PTRACE_O_TRACEVFORK | PTRACE_O_TRACECLONE |
150 PTRACE_O_TRACEEXEC;
Petr Machata55ed83b2007-05-17 16:24:15 +0200151 if (ptrace(PTRACE_SETOPTIONS, pid, 0, options) < 0 &&
152 ptrace(PTRACE_OLDSETOPTIONS, pid, 0, options) < 0) {
Ian Wienand9a2ad352006-02-20 22:44:45 +0100153 perror("PTRACE_SETOPTIONS");
154 return;
155 }
156 proc->tracesysgood |= 0x80;
157}
158
Juan Cespedesf1350522008-12-16 18:19:58 +0100159void
160untrace_pid(pid_t pid) {
Petr Machata26627682011-07-08 18:15:32 +0200161 debug(DEBUG_PROCESS, "untrace_pid: pid=%d", pid);
Juan Cespedes273ea6d1998-03-14 23:02:40 +0100162 ptrace(PTRACE_DETACH, pid, 1, 0);
Juan Cespedes1fe93d51998-03-13 00:29:21 +0100163}
164
Juan Cespedesf1350522008-12-16 18:19:58 +0100165void
166continue_after_signal(pid_t pid, int signum) {
Juan Cespedescd8976d2009-05-14 13:47:58 +0200167 debug(DEBUG_PROCESS, "continue_after_signal: pid=%d, signum=%d", pid, signum);
Petr Machata98f09922011-07-09 10:55:29 +0200168 ptrace(PTRACE_SYSCALL, pid, 0, signum);
169}
170
171static enum ecb_status
172event_for_pid(Event * event, void * data)
173{
174 if (event->proc != NULL && event->proc->pid == (pid_t)(uintptr_t)data)
175 return ecb_yield;
176 return ecb_cont;
177}
178
179static int
180have_events_for(pid_t pid)
181{
182 return each_qd_event(event_for_pid, (void *)(uintptr_t)pid) != NULL;
183}
184
185void
186continue_process(pid_t pid)
187{
188 debug(DEBUG_PROCESS, "continue_process: pid=%d", pid);
Petr Machata98f09922011-07-09 10:55:29 +0200189
190 /* Only really continue the process if there are no events in
Petr Machata36d19822011-10-21 16:03:45 +0200191 the queue for this process. Otherwise just wait for the
192 other events to arrive. */
Petr Machata98f09922011-07-09 10:55:29 +0200193 if (!have_events_for(pid))
194 /* We always trace syscalls to control fork(),
195 * clone(), execve()... */
196 ptrace(PTRACE_SYSCALL, pid, 0, 0);
197 else
198 debug(DEBUG_PROCESS,
199 "putting off the continue, events in que.");
200}
201
202/**
203 * This is used for bookkeeping related to PIDs that the event
Petr Machata750ca8c2011-10-06 14:29:34 +0200204 * handlers work with.
205 */
Petr Machata98f09922011-07-09 10:55:29 +0200206struct pid_task {
Petr Machata750ca8c2011-10-06 14:29:34 +0200207 pid_t pid; /* This may be 0 for tasks that exited
208 * mid-handling. */
Petr Machatacbe29c62011-09-27 02:27:58 +0200209 int sigstopped : 1;
210 int got_event : 1;
211 int delivered : 1;
212 int vforked : 1;
Petr Machata43d2fe52011-11-02 13:25:49 +0100213 int sysret : 1;
Petr Machata98f09922011-07-09 10:55:29 +0200214} * pids;
215
216struct pid_set {
217 struct pid_task * tasks;
218 size_t count;
219 size_t alloc;
220};
221
222/**
223 * Breakpoint re-enablement. When we hit a breakpoint, we must
224 * disable it, single-step, and re-enable it. That single-step can be
225 * done only by one task in a task group, while others are stopped,
226 * otherwise the processes would race for who sees the breakpoint
227 * disabled and who doesn't. The following is to keep track of it
228 * all.
229 */
230struct process_stopping_handler
231{
Petr Machata366c2f42012-02-09 19:34:36 +0100232 struct event_handler super;
Petr Machata98f09922011-07-09 10:55:29 +0200233
234 /* The task that is doing the re-enablement. */
235 Process * task_enabling_breakpoint;
236
237 /* The pointer being re-enabled. */
Petr Machatabc373262012-02-07 23:31:15 +0100238 struct breakpoint *breakpoint_being_enabled;
Petr Machata98f09922011-07-09 10:55:29 +0200239
Petr Machataa266acb2012-04-12 23:50:23 +0200240 /* Artificial atomic skip breakpoint, if any needed. */
241 void *atomic_skip_bp_addr;
242
Petr Machata98f09922011-07-09 10:55:29 +0200243 enum {
244 /* We are waiting for everyone to land in t/T. */
245 psh_stopping = 0,
246
247 /* We are doing the PTRACE_SINGLESTEP. */
248 psh_singlestep,
249
250 /* We are waiting for all the SIGSTOPs to arrive so
251 * that we can sink them. */
252 psh_sinking,
Petr Machata46d66ab2011-08-20 05:29:25 +0200253
254 /* This is for tracking the ugly workaround. */
255 psh_ugly_workaround,
Petr Machata98f09922011-07-09 10:55:29 +0200256 } state;
257
Petr Machata590c8082011-08-20 22:45:26 +0200258 int exiting;
259
Petr Machata98f09922011-07-09 10:55:29 +0200260 struct pid_set pids;
261};
262
Petr Machata98f09922011-07-09 10:55:29 +0200263static struct pid_task *
264get_task_info(struct pid_set * pids, pid_t pid)
265{
Petr Machata750ca8c2011-10-06 14:29:34 +0200266 assert(pid != 0);
Petr Machata98f09922011-07-09 10:55:29 +0200267 size_t i;
268 for (i = 0; i < pids->count; ++i)
269 if (pids->tasks[i].pid == pid)
270 return &pids->tasks[i];
271
272 return NULL;
273}
274
275static struct pid_task *
276add_task_info(struct pid_set * pids, pid_t pid)
277{
278 if (pids->count == pids->alloc) {
279 size_t ns = (2 * pids->alloc) ?: 4;
280 struct pid_task * n = realloc(pids->tasks,
281 sizeof(*pids->tasks) * ns);
282 if (n == NULL)
283 return NULL;
284 pids->tasks = n;
285 pids->alloc = ns;
286 }
287 struct pid_task * task_info = &pids->tasks[pids->count++];
288 memset(task_info, 0, sizeof(*task_info));
289 task_info->pid = pid;
290 return task_info;
291}
292
Petr Machata2b46cfc2012-02-18 11:17:29 +0100293static enum callback_status
294task_stopped(struct Process *task, void *data)
Petr Machatacbe29c62011-09-27 02:27:58 +0200295{
296 enum process_status st = process_status(task->pid);
297 if (data != NULL)
298 *(enum process_status *)data = st;
299
300 /* If the task is already stopped, don't worry about it.
301 * Likewise if it managed to become a zombie or terminate in
302 * the meantime. This can happen when the whole thread group
303 * is terminating. */
304 switch (st) {
305 case ps_invalid:
306 case ps_tracing_stop:
307 case ps_zombie:
Petr Machata2b46cfc2012-02-18 11:17:29 +0100308 return CBS_CONT;
Petr Machataffe4cd22012-04-11 18:01:44 +0200309 case ps_sleeping:
Petr Machata36d19822011-10-21 16:03:45 +0200310 case ps_stop:
311 case ps_other:
Petr Machata2b46cfc2012-02-18 11:17:29 +0100312 return CBS_STOP;
Petr Machatacbe29c62011-09-27 02:27:58 +0200313 }
Petr Machata36d19822011-10-21 16:03:45 +0200314
315 abort ();
Petr Machatacbe29c62011-09-27 02:27:58 +0200316}
317
318/* Task is blocked if it's stopped, or if it's a vfork parent. */
Petr Machata2b46cfc2012-02-18 11:17:29 +0100319static enum callback_status
320task_blocked(struct Process *task, void *data)
Petr Machatacbe29c62011-09-27 02:27:58 +0200321{
322 struct pid_set * pids = data;
323 struct pid_task * task_info = get_task_info(pids, task->pid);
324 if (task_info != NULL
325 && task_info->vforked)
Petr Machata2b46cfc2012-02-18 11:17:29 +0100326 return CBS_CONT;
Petr Machatacbe29c62011-09-27 02:27:58 +0200327
328 return task_stopped(task, NULL);
329}
330
Petr Machata366c2f42012-02-09 19:34:36 +0100331static Event *process_vfork_on_event(struct event_handler *super, Event *event);
Petr Machatacbe29c62011-09-27 02:27:58 +0200332
Petr Machata2b46cfc2012-02-18 11:17:29 +0100333static enum callback_status
334task_vforked(struct Process *task, void *data)
Petr Machatacbe29c62011-09-27 02:27:58 +0200335{
336 if (task->event_handler != NULL
337 && task->event_handler->on_event == &process_vfork_on_event)
Petr Machata2b46cfc2012-02-18 11:17:29 +0100338 return CBS_STOP;
339 return CBS_CONT;
Petr Machatacbe29c62011-09-27 02:27:58 +0200340}
341
342static int
343is_vfork_parent(Process * task)
344{
345 return each_task(task->leader, &task_vforked, NULL) != NULL;
346}
347
Petr Machata2b46cfc2012-02-18 11:17:29 +0100348static enum callback_status
349send_sigstop(struct Process *task, void *data)
Petr Machata98f09922011-07-09 10:55:29 +0200350{
351 Process * leader = task->leader;
352 struct pid_set * pids = data;
353
354 /* Look for pre-existing task record, or add new. */
355 struct pid_task * task_info = get_task_info(pids, task->pid);
356 if (task_info == NULL)
357 task_info = add_task_info(pids, task->pid);
358 if (task_info == NULL) {
359 perror("send_sigstop: add_task_info");
360 destroy_event_handler(leader);
361 /* Signal failure upwards. */
Petr Machata2b46cfc2012-02-18 11:17:29 +0100362 return CBS_STOP;
Petr Machata98f09922011-07-09 10:55:29 +0200363 }
364
365 /* This task still has not been attached to. It should be
366 stopped by the kernel. */
367 if (task->state == STATE_BEING_CREATED)
Petr Machata2b46cfc2012-02-18 11:17:29 +0100368 return CBS_CONT;
Petr Machata98f09922011-07-09 10:55:29 +0200369
370 /* Don't bother sending SIGSTOP if we are already stopped, or
Petr Machatacbe29c62011-09-27 02:27:58 +0200371 * if we sent the SIGSTOP already, which happens when we are
372 * handling "onexit" and inherited the handler from breakpoint
373 * re-enablement. */
374 enum process_status st;
Petr Machata2b46cfc2012-02-18 11:17:29 +0100375 if (task_stopped(task, &st) == CBS_CONT)
376 return CBS_CONT;
Petr Machata98f09922011-07-09 10:55:29 +0200377 if (task_info->sigstopped) {
378 if (!task_info->delivered)
Petr Machata2b46cfc2012-02-18 11:17:29 +0100379 return CBS_CONT;
Petr Machata98f09922011-07-09 10:55:29 +0200380 task_info->delivered = 0;
381 }
382
Petr Machatacbe29c62011-09-27 02:27:58 +0200383 /* Also don't attempt to stop the process if it's a parent of
384 * vforked process. We set up event handler specially to hint
385 * us. In that case parent is in D state, which we use to
386 * weed out unnecessary looping. */
387 if (st == ps_sleeping
388 && is_vfork_parent (task)) {
389 task_info->vforked = 1;
Petr Machata2b46cfc2012-02-18 11:17:29 +0100390 return CBS_CONT;
Petr Machatacbe29c62011-09-27 02:27:58 +0200391 }
392
Petr Machata98f09922011-07-09 10:55:29 +0200393 if (task_kill(task->pid, SIGSTOP) >= 0) {
394 debug(DEBUG_PROCESS, "send SIGSTOP to %d", task->pid);
395 task_info->sigstopped = 1;
396 } else
397 fprintf(stderr,
398 "Warning: couldn't send SIGSTOP to %d\n", task->pid);
399
Petr Machata2b46cfc2012-02-18 11:17:29 +0100400 return CBS_CONT;
Petr Machata98f09922011-07-09 10:55:29 +0200401}
402
Petr Machata73894bd2011-08-20 23:47:34 +0200403/* On certain kernels, detaching right after a singlestep causes the
404 tracee to be killed with a SIGTRAP (that even though the singlestep
405 was properly caught by waitpid. The ugly workaround is to put a
406 breakpoint where IP points and let the process continue. After
407 this the breakpoint can be retracted and the process detached. */
Petr Machata98f09922011-07-09 10:55:29 +0200408static void
Petr Machata73894bd2011-08-20 23:47:34 +0200409ugly_workaround(Process * proc)
Petr Machata590c8082011-08-20 22:45:26 +0200410{
411 void * ip = get_instruction_pointer(proc);
Petr Machatabc373262012-02-07 23:31:15 +0100412 struct breakpoint *sbp = dict_find_entry(proc->leader->breakpoints, ip);
Petr Machata590c8082011-08-20 22:45:26 +0200413 if (sbp != NULL)
414 enable_breakpoint(proc, sbp);
415 else
416 insert_breakpoint(proc, ip, NULL, 1);
Petr Machata73894bd2011-08-20 23:47:34 +0200417 ptrace(PTRACE_CONT, proc->pid, 0, 0);
Petr Machata590c8082011-08-20 22:45:26 +0200418}
419
420static void
Petr Machata98f09922011-07-09 10:55:29 +0200421process_stopping_done(struct process_stopping_handler * self, Process * leader)
422{
423 debug(DEBUG_PROCESS, "process stopping done %d",
424 self->task_enabling_breakpoint->pid);
425 size_t i;
Petr Machata590c8082011-08-20 22:45:26 +0200426 if (!self->exiting) {
427 for (i = 0; i < self->pids.count; ++i)
428 if (self->pids.tasks[i].pid != 0
Petr Machata43d2fe52011-11-02 13:25:49 +0100429 && (self->pids.tasks[i].delivered
430 || self->pids.tasks[i].sysret))
Petr Machata590c8082011-08-20 22:45:26 +0200431 continue_process(self->pids.tasks[i].pid);
432 continue_process(self->task_enabling_breakpoint->pid);
433 destroy_event_handler(leader);
434 } else {
435 self->state = psh_ugly_workaround;
Petr Machata73894bd2011-08-20 23:47:34 +0200436 ugly_workaround(self->task_enabling_breakpoint);
Petr Machata590c8082011-08-20 22:45:26 +0200437 }
438}
439
440/* Before we detach, we need to make sure that task's IP is on the
441 * edge of an instruction. So for tasks that have a breakpoint event
442 * in the queue, we adjust the instruction pointer, just like
443 * continue_after_breakpoint does. */
444static enum ecb_status
445undo_breakpoint(Event * event, void * data)
446{
447 if (event != NULL
448 && event->proc->leader == data
449 && event->type == EVENT_BREAKPOINT)
450 set_instruction_pointer(event->proc, event->e_un.brk_addr);
451 return ecb_cont;
452}
453
Petr Machata2b46cfc2012-02-18 11:17:29 +0100454static enum callback_status
455untrace_task(struct Process *task, void *data)
Petr Machata590c8082011-08-20 22:45:26 +0200456{
457 if (task != data)
458 untrace_pid(task->pid);
Petr Machata2b46cfc2012-02-18 11:17:29 +0100459 return CBS_CONT;
Petr Machata590c8082011-08-20 22:45:26 +0200460}
461
Petr Machata2b46cfc2012-02-18 11:17:29 +0100462static enum callback_status
463remove_task(struct Process *task, void *data)
Petr Machata590c8082011-08-20 22:45:26 +0200464{
465 /* Don't untrace leader just yet. */
466 if (task != data)
467 remove_process(task);
Petr Machata2b46cfc2012-02-18 11:17:29 +0100468 return CBS_CONT;
Petr Machata590c8082011-08-20 22:45:26 +0200469}
470
471static void
472detach_process(Process * leader)
473{
474 each_qd_event(&undo_breakpoint, leader);
475 disable_all_breakpoints(leader);
476
477 /* Now untrace the process, if it was attached to by -p. */
478 struct opt_p_t * it;
479 for (it = opt_p; it != NULL; it = it->next) {
480 Process * proc = pid2proc(it->pid);
481 if (proc == NULL)
482 continue;
483 if (proc->leader == leader) {
484 each_task(leader, &untrace_task, NULL);
485 break;
486 }
487 }
488 each_task(leader, &remove_task, leader);
Petr Machata98f09922011-07-09 10:55:29 +0200489 destroy_event_handler(leader);
Petr Machata590c8082011-08-20 22:45:26 +0200490 remove_task(leader, NULL);
Petr Machata98f09922011-07-09 10:55:29 +0200491}
492
493static void
494handle_stopping_event(struct pid_task * task_info, Event ** eventp)
495{
496 /* Mark all events, so that we know whom to SIGCONT later. */
Petr Machata3c9b6292011-08-20 15:05:41 +0200497 if (task_info != NULL)
Petr Machata98f09922011-07-09 10:55:29 +0200498 task_info->got_event = 1;
499
500 Event * event = *eventp;
501
502 /* In every state, sink SIGSTOP events for tasks that it was
503 * sent to. */
504 if (task_info != NULL
505 && event->type == EVENT_SIGNAL
506 && event->e_un.signum == SIGSTOP) {
507 debug(DEBUG_PROCESS, "SIGSTOP delivered to %d", task_info->pid);
508 if (task_info->sigstopped
509 && !task_info->delivered) {
510 task_info->delivered = 1;
511 *eventp = NULL; // sink the event
512 } else
513 fprintf(stderr, "suspicious: %d got SIGSTOP, but "
514 "sigstopped=%d and delivered=%d\n",
515 task_info->pid, task_info->sigstopped,
516 task_info->delivered);
Juan Cespedese74c80d2009-02-11 11:32:31 +0100517 }
Juan Cespedes5e01f651998-03-08 22:31:44 +0100518}
519
Petr Machata98f09922011-07-09 10:55:29 +0200520/* Some SIGSTOPs may have not been delivered to their respective tasks
521 * yet. They are still in the queue. If we have seen an event for
522 * that process, continue it, so that the SIGSTOP can be delivered and
Petr Machata36d19822011-10-21 16:03:45 +0200523 * caught by ltrace. We don't mind that the process is after
524 * breakpoint (and therefore potentially doesn't have aligned IP),
525 * because the signal will be delivered without the process actually
526 * starting. */
Petr Machata98f09922011-07-09 10:55:29 +0200527static void
528continue_for_sigstop_delivery(struct pid_set * pids)
529{
530 size_t i;
531 for (i = 0; i < pids->count; ++i) {
Petr Machata750ca8c2011-10-06 14:29:34 +0200532 if (pids->tasks[i].pid != 0
533 && pids->tasks[i].sigstopped
Petr Machata98f09922011-07-09 10:55:29 +0200534 && !pids->tasks[i].delivered
535 && pids->tasks[i].got_event) {
536 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery",
537 pids->tasks[i].pid);
538 ptrace(PTRACE_SYSCALL, pids->tasks[i].pid, 0, 0);
539 }
540 }
Juan Cespedes5e01f651998-03-08 22:31:44 +0100541}
542
Petr Machata98f09922011-07-09 10:55:29 +0200543static int
Petr Machata750ca8c2011-10-06 14:29:34 +0200544event_exit_p(Event * event)
545{
546 return event != NULL && (event->type == EVENT_EXIT
547 || event->type == EVENT_EXIT_SIGNAL);
548}
549
550static int
Petr Machata98f09922011-07-09 10:55:29 +0200551event_exit_or_none_p(Event * event)
Petr Machataf789c9c2011-07-09 10:54:27 +0200552{
Petr Machata750ca8c2011-10-06 14:29:34 +0200553 return event == NULL || event_exit_p(event)
Petr Machata98f09922011-07-09 10:55:29 +0200554 || event->type == EVENT_NONE;
555}
556
557static int
558await_sigstop_delivery(struct pid_set * pids, struct pid_task * task_info,
559 Event * event)
560{
561 /* If we still didn't get our SIGSTOP, continue the process
562 * and carry on. */
563 if (event != NULL && !event_exit_or_none_p(event)
564 && task_info != NULL && task_info->sigstopped) {
565 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery",
566 task_info->pid);
567 /* We should get the signal the first thing
568 * after this, so it should be OK to continue
569 * even if we are over a breakpoint. */
570 ptrace(PTRACE_SYSCALL, task_info->pid, 0, 0);
571
572 } else {
573 /* If all SIGSTOPs were delivered, uninstall the
574 * handler and continue everyone. */
575 /* XXX I suspect that we should check tasks that are
576 * still around. Is things are now, there should be a
577 * race between waiting for everyone to stop and one
578 * of the tasks exiting. */
579 int all_clear = 1;
580 size_t i;
581 for (i = 0; i < pids->count; ++i)
Petr Machata750ca8c2011-10-06 14:29:34 +0200582 if (pids->tasks[i].pid != 0
583 && pids->tasks[i].sigstopped
Petr Machata98f09922011-07-09 10:55:29 +0200584 && !pids->tasks[i].delivered) {
585 all_clear = 0;
586 break;
587 }
588 return all_clear;
589 }
590
591 return 0;
592}
593
Petr Machata590c8082011-08-20 22:45:26 +0200594static int
595all_stops_accountable(struct pid_set * pids)
596{
597 size_t i;
598 for (i = 0; i < pids->count; ++i)
599 if (pids->tasks[i].pid != 0
600 && !pids->tasks[i].got_event
601 && !have_events_for(pids->tasks[i].pid))
602 return 0;
603 return 1;
604}
605
Petr Machataa266acb2012-04-12 23:50:23 +0200606/* The protocol is: 0 for success, negative for failure, positive if
607 * default singlestep is to be used. */
608int arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp,
609 int (*add_cb)(void *addr, void *data),
610 void *add_cb_data);
611
612#ifndef ARCH_HAVE_ATOMIC_SINGLESTEP
613int
614arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp,
615 int (*add_cb)(void *addr, void *data),
616 void *add_cb_data)
Petr Machata06986d52011-11-02 13:22:46 +0100617{
Petr Machataa266acb2012-04-12 23:50:23 +0200618 return 1;
619}
620#endif
621
622static int
623atomic_singlestep_add_bp(void *addr, void *data)
624{
625 struct process_stopping_handler *self = data;
626 struct Process *proc = self->task_enabling_breakpoint;
627
628 /* Only support single address as of now. */
629 assert(self->atomic_skip_bp_addr == NULL);
630
631 self->atomic_skip_bp_addr = addr + 4;
632 insert_breakpoint(proc->leader, self->atomic_skip_bp_addr, NULL, 1);
633
634 return 0;
635}
636
637static int
638singlestep(struct process_stopping_handler *self)
639{
640 struct Process *proc = self->task_enabling_breakpoint;
641
642 int status = arch_atomic_singlestep(self->task_enabling_breakpoint,
643 self->breakpoint_being_enabled,
644 &atomic_singlestep_add_bp, self);
645
646 /* Propagate failure and success. */
647 if (status <= 0)
648 return status;
649
650 /* Otherwise do the default action: singlestep. */
Petr Machata06986d52011-11-02 13:22:46 +0100651 debug(1, "PTRACE_SINGLESTEP");
Petr Machataa266acb2012-04-12 23:50:23 +0200652 if (ptrace(PTRACE_SINGLESTEP, proc->pid, 0, 0)) {
Petr Machata06986d52011-11-02 13:22:46 +0100653 perror("PTRACE_SINGLESTEP");
Petr Machataa266acb2012-04-12 23:50:23 +0200654 return -1;
655 }
656 return 0;
657}
658
659static void
660post_singlestep(struct process_stopping_handler *self, Event **eventp)
661{
662 continue_for_sigstop_delivery(&self->pids);
663
664 if ((*eventp)->type == EVENT_BREAKPOINT)
665 *eventp = NULL; // handled
666
667 if (self->atomic_skip_bp_addr != 0)
668 delete_breakpoint(self->task_enabling_breakpoint->leader,
669 self->atomic_skip_bp_addr);
670
671 self->breakpoint_being_enabled = NULL;
672}
673
674static void
675singlestep_error(struct process_stopping_handler *self, Event **eventp)
676{
677 struct Process *teb = self->task_enabling_breakpoint;
678 Breakpoint *sbp = self->breakpoint_being_enabled;
679 fprintf(stderr, "%d couldn't singlestep over %s (%p)\n",
680 teb->pid, sbp->libsym != NULL ? sbp->libsym->name : NULL,
681 sbp->addr);
682 delete_breakpoint(teb->leader, sbp->addr);
683 post_singlestep(self, eventp);
Petr Machata06986d52011-11-02 13:22:46 +0100684}
685
Petr Machata98f09922011-07-09 10:55:29 +0200686/* This event handler is installed when we are in the process of
687 * stopping the whole thread group to do the pointer re-enablement for
688 * one of the threads. We pump all events to the queue for later
689 * processing while we wait for all the threads to stop. When this
690 * happens, we let the re-enablement thread to PTRACE_SINGLESTEP,
691 * re-enable, and continue everyone. */
692static Event *
Petr Machata366c2f42012-02-09 19:34:36 +0100693process_stopping_on_event(struct event_handler *super, Event *event)
Petr Machata98f09922011-07-09 10:55:29 +0200694{
695 struct process_stopping_handler * self = (void *)super;
696 Process * task = event->proc;
697 Process * leader = task->leader;
Petr Machatabc373262012-02-07 23:31:15 +0100698 struct breakpoint *sbp = self->breakpoint_being_enabled;
Petr Machatae21264e2011-10-06 14:30:33 +0200699 Process * teb = self->task_enabling_breakpoint;
Petr Machata98f09922011-07-09 10:55:29 +0200700
701 debug(DEBUG_PROCESS,
702 "pid %d; event type %d; state %d",
703 task->pid, event->type, self->state);
704
705 struct pid_task * task_info = get_task_info(&self->pids, task->pid);
706 if (task_info == NULL)
707 fprintf(stderr, "new task??? %d\n", task->pid);
708 handle_stopping_event(task_info, &event);
709
710 int state = self->state;
711 int event_to_queue = !event_exit_or_none_p(event);
712
Petr Machata18c97072011-10-06 14:30:11 +0200713 /* Deactivate the entry if the task exits. */
714 if (event_exit_p(event) && task_info != NULL)
715 task_info->pid = 0;
716
Petr Machata43d2fe52011-11-02 13:25:49 +0100717 /* Always handle sysrets. Whether sysret occurred and what
718 * sys it rets from may need to be determined based on process
719 * stack, so we need to keep that in sync with reality. Note
720 * that we don't continue the process after the sysret is
721 * handled. See continue_after_syscall. */
722 if (event != NULL && event->type == EVENT_SYSRET) {
723 debug(1, "%d LT_EV_SYSRET", event->proc->pid);
724 event_to_queue = 0;
725 task_info->sysret = 1;
726 }
727
Petr Machata98f09922011-07-09 10:55:29 +0200728 switch (state) {
729 case psh_stopping:
730 /* If everyone is stopped, singlestep. */
Petr Machatacbe29c62011-09-27 02:27:58 +0200731 if (each_task(leader, &task_blocked, &self->pids) == NULL) {
Petr Machata98f09922011-07-09 10:55:29 +0200732 debug(DEBUG_PROCESS, "all stopped, now SINGLESTEP %d",
Petr Machatae21264e2011-10-06 14:30:33 +0200733 teb->pid);
734 if (sbp->enabled)
735 disable_breakpoint(teb, sbp);
Petr Machataa266acb2012-04-12 23:50:23 +0200736 if (singlestep(self) < 0) {
737 singlestep_error(self, &event);
738 goto psh_sinking;
739 }
740
Petr Machata98f09922011-07-09 10:55:29 +0200741 self->state = state = psh_singlestep;
742 }
743 break;
744
Petr Machata06986d52011-11-02 13:22:46 +0100745 case psh_singlestep:
Petr Machata98f09922011-07-09 10:55:29 +0200746 /* In singlestep state, breakpoint signifies that we
747 * have now stepped, and can re-enable the breakpoint. */
Petr Machatae21264e2011-10-06 14:30:33 +0200748 if (event != NULL && task == teb) {
Petr Machatad5d93c42011-10-21 16:41:10 +0200749
Petr Machata06986d52011-11-02 13:22:46 +0100750 /* This is not the singlestep that we are waiting for. */
Petr Machatad5d93c42011-10-21 16:41:10 +0200751 if (event->type == EVENT_SIGNAL) {
Petr Machataa266acb2012-04-12 23:50:23 +0200752 if (singlestep(self) < 0) {
753 singlestep_error(self, &event);
754 goto psh_sinking;
755 }
Petr Machatad5d93c42011-10-21 16:41:10 +0200756 break;
757 }
758
Petr Machata98f09922011-07-09 10:55:29 +0200759 /* Essentially we don't care what event caused
760 * the thread to stop. We can do the
761 * re-enablement now. */
Petr Machata590c8082011-08-20 22:45:26 +0200762 if (sbp->enabled)
763 enable_breakpoint(teb, sbp);
Petr Machata98f09922011-07-09 10:55:29 +0200764
Petr Machataa266acb2012-04-12 23:50:23 +0200765 post_singlestep(self, &event);
766 goto psh_sinking;
767 }
768 break;
Petr Machata98f09922011-07-09 10:55:29 +0200769
Petr Machataa266acb2012-04-12 23:50:23 +0200770 psh_sinking:
771 state = self->state = psh_sinking;
Petr Machata98f09922011-07-09 10:55:29 +0200772 case psh_sinking:
773 if (await_sigstop_delivery(&self->pids, task_info, event))
774 process_stopping_done(self, leader);
Petr Machata590c8082011-08-20 22:45:26 +0200775 break;
776
777 case psh_ugly_workaround:
778 if (event == NULL)
779 break;
780 if (event->type == EVENT_BREAKPOINT) {
781 undo_breakpoint(event, leader);
782 if (task == teb)
783 self->task_enabling_breakpoint = NULL;
784 }
785 if (self->task_enabling_breakpoint == NULL
786 && all_stops_accountable(&self->pids)) {
787 undo_breakpoint(event, leader);
788 detach_process(leader);
789 event = NULL; // handled
790 }
Petr Machata98f09922011-07-09 10:55:29 +0200791 }
792
793 if (event != NULL && event_to_queue) {
794 enque_event(event);
795 event = NULL; // sink the event
796 }
797
798 return event;
799}
800
801static void
Petr Machata366c2f42012-02-09 19:34:36 +0100802process_stopping_destroy(struct event_handler *super)
Petr Machata98f09922011-07-09 10:55:29 +0200803{
804 struct process_stopping_handler * self = (void *)super;
Petr Machata98f09922011-07-09 10:55:29 +0200805 free(self->pids.tasks);
Juan Cespedes5e01f651998-03-08 22:31:44 +0100806}
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +0100807
Juan Cespedesf1350522008-12-16 18:19:58 +0100808void
Petr Machatabc373262012-02-07 23:31:15 +0100809continue_after_breakpoint(Process *proc, struct breakpoint *sbp)
Petr Machata26627682011-07-08 18:15:32 +0200810{
Juan Cespedes5c3fe062004-06-14 18:08:37 +0200811 set_instruction_pointer(proc, sbp->addr);
Juan Cespedes8f8282f2002-03-03 18:58:40 +0100812 if (sbp->enabled == 0) {
813 continue_process(proc->pid);
814 } else {
Petr Machata26627682011-07-08 18:15:32 +0200815 debug(DEBUG_PROCESS,
816 "continue_after_breakpoint: pid=%d, addr=%p",
817 proc->pid, sbp->addr);
Arnaud Patardf3d1c532010-01-08 08:40:04 -0500818#if defined __sparc__ || defined __ia64___ || defined __mips__
Ian Wienand9a2ad352006-02-20 22:44:45 +0100819 /* we don't want to singlestep here */
Juan Cespedes5c3fe062004-06-14 18:08:37 +0200820 continue_process(proc->pid);
821#else
Petr Machata98f09922011-07-09 10:55:29 +0200822 struct process_stopping_handler * handler
823 = calloc(sizeof(*handler), 1);
824 if (handler == NULL) {
825 perror("malloc breakpoint disable handler");
826 fatal:
827 /* Carry on not bothering to re-enable. */
828 continue_process(proc->pid);
829 return;
830 }
831
832 handler->super.on_event = process_stopping_on_event;
833 handler->super.destroy = process_stopping_destroy;
834 handler->task_enabling_breakpoint = proc;
835 handler->breakpoint_being_enabled = sbp;
836 install_event_handler(proc->leader, &handler->super);
837
838 if (each_task(proc->leader, &send_sigstop,
839 &handler->pids) != NULL)
840 goto fatal;
841
842 /* And deliver the first fake event, in case all the
843 * conditions are already fulfilled. */
844 Event ev;
845 ev.type = EVENT_NONE;
846 ev.proc = proc;
847 process_stopping_on_event(&handler->super, &ev);
Juan Cespedes5c3fe062004-06-14 18:08:37 +0200848#endif
Juan Cespedes8f8282f2002-03-03 18:58:40 +0100849 }
850}
851
Petr Machata602330f2011-07-09 11:15:34 +0200852/**
853 * Ltrace exit. When we are about to exit, we have to go through all
854 * the processes, stop them all, remove all the breakpoints, and then
855 * detach the processes that we attached to using -p. If we left the
856 * other tasks running, they might hit stray return breakpoints and
857 * produce artifacts, so we better stop everyone, even if it's a bit
858 * of extra work.
859 */
860struct ltrace_exiting_handler
861{
Petr Machata366c2f42012-02-09 19:34:36 +0100862 struct event_handler super;
Petr Machata602330f2011-07-09 11:15:34 +0200863 struct pid_set pids;
864};
865
Petr Machata602330f2011-07-09 11:15:34 +0200866static Event *
Petr Machata366c2f42012-02-09 19:34:36 +0100867ltrace_exiting_on_event(struct event_handler *super, Event *event)
Petr Machata602330f2011-07-09 11:15:34 +0200868{
869 struct ltrace_exiting_handler * self = (void *)super;
870 Process * task = event->proc;
871 Process * leader = task->leader;
872
873 debug(DEBUG_PROCESS, "pid %d; event type %d", task->pid, event->type);
874
875 struct pid_task * task_info = get_task_info(&self->pids, task->pid);
876 handle_stopping_event(task_info, &event);
877
Petr Machata590c8082011-08-20 22:45:26 +0200878 if (event != NULL && event->type == EVENT_BREAKPOINT)
879 undo_breakpoint(event, leader);
Petr Machata4b9f4d92011-08-20 04:07:05 +0200880
881 if (await_sigstop_delivery(&self->pids, task_info, event)
Petr Machata590c8082011-08-20 22:45:26 +0200882 && all_stops_accountable(&self->pids))
883 detach_process(leader);
Petr Machata602330f2011-07-09 11:15:34 +0200884
885 /* Sink all non-exit events. We are about to exit, so we
886 * don't bother with queuing them. */
887 if (event_exit_or_none_p(event))
888 return event;
Petr Machata13d5df72011-08-19 23:15:15 +0200889
Petr Machata13d5df72011-08-19 23:15:15 +0200890 return NULL;
Petr Machata602330f2011-07-09 11:15:34 +0200891}
892
893static void
Petr Machata366c2f42012-02-09 19:34:36 +0100894ltrace_exiting_destroy(struct event_handler *super)
Petr Machata602330f2011-07-09 11:15:34 +0200895{
896 struct ltrace_exiting_handler * self = (void *)super;
897 free(self->pids.tasks);
898}
899
900static int
901ltrace_exiting_install_handler(Process * proc)
902{
903 /* Only install to leader. */
904 if (proc->leader != proc)
905 return 0;
906
907 /* Perhaps we are already installed, if the user passed
908 * several -p options that are tasks of one process. */
909 if (proc->event_handler != NULL
910 && proc->event_handler->on_event == &ltrace_exiting_on_event)
911 return 0;
912
Petr Machata590c8082011-08-20 22:45:26 +0200913 /* If stopping handler is already present, let it do the
914 * work. */
915 if (proc->event_handler != NULL) {
916 assert(proc->event_handler->on_event
917 == &process_stopping_on_event);
918 struct process_stopping_handler * other
919 = (void *)proc->event_handler;
920 other->exiting = 1;
921 return 0;
922 }
923
Petr Machata602330f2011-07-09 11:15:34 +0200924 struct ltrace_exiting_handler * handler
925 = calloc(sizeof(*handler), 1);
926 if (handler == NULL) {
927 perror("malloc exiting handler");
928 fatal:
929 /* XXXXXXXXXXXXXXXXXXX fixme */
930 return -1;
931 }
932
Petr Machata602330f2011-07-09 11:15:34 +0200933 handler->super.on_event = ltrace_exiting_on_event;
934 handler->super.destroy = ltrace_exiting_destroy;
935 install_event_handler(proc->leader, &handler->super);
936
937 if (each_task(proc->leader, &send_sigstop,
938 &handler->pids) != NULL)
939 goto fatal;
940
941 return 0;
942}
943
Petr Machatacbe29c62011-09-27 02:27:58 +0200944/*
945 * When the traced process vforks, it's suspended until the child
946 * process calls _exit or exec*. In the meantime, the two share the
947 * address space.
948 *
949 * The child process should only ever call _exit or exec*, but we
950 * can't count on that (it's not the role of ltrace to policy, but to
951 * observe). In any case, we will _at least_ have to deal with
952 * removal of vfork return breakpoint (which we have to smuggle back
953 * in, so that the parent can see it, too), and introduction of exec*
954 * return breakpoint. Since we already have both breakpoint actions
955 * to deal with, we might as well support it all.
956 *
957 * The gist is that we pretend that the child is in a thread group
958 * with its parent, and handle it as a multi-threaded case, with the
959 * exception that we know that the parent is blocked, and don't
960 * attempt to stop it. When the child execs, we undo the setup.
Petr Machatacbe29c62011-09-27 02:27:58 +0200961 */
962
Petr Machata134a1082011-09-27 20:25:58 +0200963struct process_vfork_handler
964{
Petr Machata366c2f42012-02-09 19:34:36 +0100965 struct event_handler super;
Petr Machata134a1082011-09-27 20:25:58 +0200966 void * bp_addr;
967};
968
Petr Machatacbe29c62011-09-27 02:27:58 +0200969static Event *
Petr Machata366c2f42012-02-09 19:34:36 +0100970process_vfork_on_event(struct event_handler *super, Event *event)
Petr Machatacbe29c62011-09-27 02:27:58 +0200971{
972 struct process_vfork_handler * self = (void *)super;
Petr Machatabc373262012-02-07 23:31:15 +0100973 struct breakpoint *sbp;
Petr Machatacbe29c62011-09-27 02:27:58 +0200974 assert(self != NULL);
975
976 switch (event->type) {
Petr Machata134a1082011-09-27 20:25:58 +0200977 case EVENT_BREAKPOINT:
978 /* Remember the vfork return breakpoint. */
979 if (self->bp_addr == NULL)
980 self->bp_addr = event->e_un.brk_addr;
981 break;
982
Petr Machatacbe29c62011-09-27 02:27:58 +0200983 case EVENT_EXIT:
984 case EVENT_EXIT_SIGNAL:
985 case EVENT_EXEC:
Petr Machata134a1082011-09-27 20:25:58 +0200986 /* Smuggle back in the vfork return breakpoint, so
987 * that our parent can trip over it once again. */
988 if (self->bp_addr != NULL) {
989 sbp = dict_find_entry(event->proc->leader->breakpoints,
990 self->bp_addr);
991 if (sbp != NULL)
Petr Machata3797cd62011-10-03 19:23:37 +0200992 insert_breakpoint(event->proc->parent,
993 self->bp_addr,
994 sbp->libsym, 1);
Petr Machata134a1082011-09-27 20:25:58 +0200995 }
996
Petr Machataba9911f2011-09-27 21:09:47 +0200997 continue_process(event->proc->parent->pid);
Petr Machata134a1082011-09-27 20:25:58 +0200998
999 /* Remove the leader that we artificially set up
1000 * earlier. */
Petr Machatacbe29c62011-09-27 02:27:58 +02001001 change_process_leader(event->proc, event->proc);
1002 destroy_event_handler(event->proc);
1003
Petr Machatacbe29c62011-09-27 02:27:58 +02001004 default:
1005 ;
1006 }
1007
1008 return event;
1009}
1010
1011void
1012continue_after_vfork(Process * proc)
1013{
1014 debug(DEBUG_PROCESS, "continue_after_vfork: pid=%d", proc->pid);
Petr Machata134a1082011-09-27 20:25:58 +02001015 struct process_vfork_handler * handler = calloc(sizeof(*handler), 1);
Petr Machatacbe29c62011-09-27 02:27:58 +02001016 if (handler == NULL) {
1017 perror("malloc vfork handler");
1018 /* Carry on not bothering to treat the process as
1019 * necessary. */
1020 continue_process(proc->parent->pid);
1021 return;
1022 }
1023
1024 /* We must set up custom event handler, so that we see
1025 * exec/exit events for the task itself. */
Petr Machata134a1082011-09-27 20:25:58 +02001026 handler->super.on_event = process_vfork_on_event;
1027 install_event_handler(proc, &handler->super);
Petr Machatacbe29c62011-09-27 02:27:58 +02001028
1029 /* Make sure that the child is sole thread. */
1030 assert(proc->leader == proc);
1031 assert(proc->next == NULL || proc->next->leader != proc);
1032
1033 /* Make sure that the child's parent is properly set up. */
1034 assert(proc->parent != NULL);
1035 assert(proc->parent->leader != NULL);
1036
1037 change_process_leader(proc, proc->parent->leader);
Petr Machatacbe29c62011-09-27 02:27:58 +02001038}
1039
Petr Machata9d29b3e2011-11-09 16:46:56 +01001040static int
1041is_mid_stopping(Process *proc)
1042{
1043 return proc != NULL
1044 && proc->event_handler != NULL
1045 && proc->event_handler->on_event == &process_stopping_on_event;
1046}
1047
Petr Machata43d2fe52011-11-02 13:25:49 +01001048void
1049continue_after_syscall(Process * proc, int sysnum, int ret_p)
1050{
1051 /* Don't continue if we are mid-stopping. */
Petr Machata9d29b3e2011-11-09 16:46:56 +01001052 if (ret_p && (is_mid_stopping(proc) || is_mid_stopping(proc->leader))) {
1053 debug(DEBUG_PROCESS,
1054 "continue_after_syscall: don't continue %d",
1055 proc->pid);
Petr Machata43d2fe52011-11-02 13:25:49 +01001056 return;
Petr Machata9d29b3e2011-11-09 16:46:56 +01001057 }
Petr Machata43d2fe52011-11-02 13:25:49 +01001058 continue_process(proc->pid);
1059}
1060
Petr Machata602330f2011-07-09 11:15:34 +02001061/* If ltrace gets SIGINT, the processes directly or indirectly run by
1062 * ltrace get it too. We just have to wait long enough for the signal
1063 * to be delivered and the process terminated, which we notice and
1064 * exit ltrace, too. So there's not much we need to do there. We
1065 * want to keep tracing those processes as usual, in case they just
1066 * SIG_IGN the SIGINT to do their shutdown etc.
1067 *
1068 * For processes ran on the background, we want to install an exit
1069 * handler that stops all the threads, removes all breakpoints, and
1070 * detaches.
1071 */
1072void
Petr Machataffe4cd22012-04-11 18:01:44 +02001073os_ltrace_exiting(void)
Petr Machata602330f2011-07-09 11:15:34 +02001074{
1075 struct opt_p_t * it;
1076 for (it = opt_p; it != NULL; it = it->next) {
1077 Process * proc = pid2proc(it->pid);
1078 if (proc == NULL || proc->leader == NULL)
1079 continue;
1080 if (ltrace_exiting_install_handler(proc->leader) < 0)
1081 fprintf(stderr,
1082 "Couldn't install exiting handler for %d.\n",
1083 proc->pid);
1084 }
1085}
1086
Petr Machataffe4cd22012-04-11 18:01:44 +02001087int
1088os_ltrace_exiting_sighandler(void)
1089{
1090 extern int linux_in_waitpid;
1091 if (linux_in_waitpid) {
1092 os_ltrace_exiting();
1093 return 1;
1094 }
1095 return 0;
1096}
1097
Joe Damatodfa3fa32010-11-08 15:47:35 -08001098size_t
1099umovebytes(Process *proc, void *addr, void *laddr, size_t len) {
1100
1101 union {
1102 long a;
1103 char c[sizeof(long)];
1104 } a;
Zachary T Welchba6aca22010-12-08 18:55:09 -08001105 int started = 0;
1106 size_t offset = 0, bytes_read = 0;
Joe Damatodfa3fa32010-11-08 15:47:35 -08001107
1108 while (offset < len) {
1109 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0);
1110 if (a.a == -1 && errno) {
1111 if (started && errno == EIO)
1112 return bytes_read;
1113 else
1114 return -1;
1115 }
1116 started = 1;
1117
1118 if (len - offset >= sizeof(long)) {
1119 memcpy(laddr + offset, &a.c[0], sizeof(long));
1120 bytes_read += sizeof(long);
1121 }
1122 else {
1123 memcpy(laddr + offset, &a.c[0], len - offset);
1124 bytes_read += (len - offset);
1125 }
1126 offset += sizeof(long);
1127 }
1128
1129 return bytes_read;
1130}
1131
Steve Fink7bafff02006-08-07 04:50:42 +02001132/* Read a series of bytes starting at the process's memory address
1133 'addr' and continuing until a NUL ('\0') is seen or 'len' bytes
1134 have been read.
1135*/
Juan Cespedesf1350522008-12-16 18:19:58 +01001136int
Juan Cespedesa8909f72009-04-28 20:02:41 +02001137umovestr(Process *proc, void *addr, int len, void *laddr) {
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001138 union {
1139 long a;
1140 char c[sizeof(long)];
1141 } a;
Zachary T Welchba6aca22010-12-08 18:55:09 -08001142 unsigned i;
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001143 int offset = 0;
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001144
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001145 while (offset < len) {
1146 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0);
1147 for (i = 0; i < sizeof(long); i++) {
Paul Gilliam3f1219f2006-04-24 18:25:38 +02001148 if (a.c[i] && offset + (signed)i < len) {
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001149 *(char *)(laddr + offset + i) = a.c[i];
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001150 } else {
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001151 *(char *)(laddr + offset + i) = '\0';
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001152 return 0;
1153 }
1154 }
1155 offset += sizeof(long);
1156 }
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001157 *(char *)(laddr + offset) = '\0';
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001158 return 0;
1159}