blob: 96132713e43a1d0179c32d5ba1191605af9d33eb [file] [log] [blame]
Petr Machatacec06ec2012-04-10 13:31:55 +02001#include "config.h"
2
3#include <asm/unistd.h>
4#include <sys/types.h>
5#include <sys/wait.h>
6#include <assert.h>
7#include <errno.h>
Juan Cespedes5e01f651998-03-08 22:31:44 +01008#include <stdio.h>
Juan Cespedes504a3852003-02-04 23:24:38 +01009#include <stdlib.h>
Juan Cespedes1fe93d51998-03-13 00:29:21 +010010#include <string.h>
Juan Cespedes8f8282f2002-03-03 18:58:40 +010011#include <unistd.h>
Juan Cespedes5e01f651998-03-08 22:31:44 +010012
Petr Machatacec06ec2012-04-10 13:31:55 +020013#ifdef HAVE_LIBSELINUX
14# include <selinux/selinux.h>
15#endif
16
17#include "ptrace.h"
Juan Cespedesf7281232009-06-25 16:11:21 +020018#include "common.h"
Petr Machata9294d822012-02-07 12:35:58 +010019#include "breakpoint.h"
Petr Machata366c2f42012-02-09 19:34:36 +010020#include "proc.h"
Petr Machata55ed83b2007-05-17 16:24:15 +020021
22/* If the system headers did not provide the constants, hard-code the normal
23 values. */
24#ifndef PTRACE_EVENT_FORK
25
26#define PTRACE_OLDSETOPTIONS 21
27#define PTRACE_SETOPTIONS 0x4200
28#define PTRACE_GETEVENTMSG 0x4201
29
30/* options set using PTRACE_SETOPTIONS */
31#define PTRACE_O_TRACESYSGOOD 0x00000001
32#define PTRACE_O_TRACEFORK 0x00000002
33#define PTRACE_O_TRACEVFORK 0x00000004
34#define PTRACE_O_TRACECLONE 0x00000008
35#define PTRACE_O_TRACEEXEC 0x00000010
36#define PTRACE_O_TRACEVFORKDONE 0x00000020
37#define PTRACE_O_TRACEEXIT 0x00000040
38
39/* Wait extended result codes for the above trace options. */
40#define PTRACE_EVENT_FORK 1
41#define PTRACE_EVENT_VFORK 2
42#define PTRACE_EVENT_CLONE 3
43#define PTRACE_EVENT_EXEC 4
44#define PTRACE_EVENT_VFORK_DONE 5
45#define PTRACE_EVENT_EXIT 6
46
47#endif /* PTRACE_EVENT_FORK */
Ian Wienand9a2ad352006-02-20 22:44:45 +010048
Luis Machado55c5feb2008-03-12 15:56:01 +010049#ifdef ARCH_HAVE_UMOVELONG
Juan Cespedesa8909f72009-04-28 20:02:41 +020050extern int arch_umovelong (Process *, void *, long *, arg_type_info *);
Juan Cespedesf1350522008-12-16 18:19:58 +010051int
Juan Cespedesa8909f72009-04-28 20:02:41 +020052umovelong (Process *proc, void *addr, long *result, arg_type_info *info) {
Luis Machado55c5feb2008-03-12 15:56:01 +010053 return arch_umovelong (proc, addr, result, info);
54}
55#else
56/* Read a single long from the process's memory address 'addr' */
Juan Cespedesf1350522008-12-16 18:19:58 +010057int
Juan Cespedesa8909f72009-04-28 20:02:41 +020058umovelong (Process *proc, void *addr, long *result, arg_type_info *info) {
Luis Machado55c5feb2008-03-12 15:56:01 +010059 long pointed_to;
60
61 errno = 0;
62 pointed_to = ptrace (PTRACE_PEEKTEXT, proc->pid, addr, 0);
63 if (pointed_to == -1 && errno)
64 return -errno;
65
66 *result = pointed_to;
Arnaud Patardf16fcff2010-01-08 08:40:19 -050067 if (info) {
68 switch(info->type) {
69 case ARGTYPE_INT:
70 *result &= 0x00000000ffffffffUL;
71 default:
72 break;
73 };
74 }
Luis Machado55c5feb2008-03-12 15:56:01 +010075 return 0;
76}
77#endif
78
Juan Cespedesf1350522008-12-16 18:19:58 +010079void
Petr Machatacec06ec2012-04-10 13:31:55 +020080trace_fail_warning(pid_t pid)
81{
82 /* This was adapted from GDB. */
83#ifdef HAVE_LIBSELINUX
84 static int checked = 0;
85 if (checked)
86 return;
87 checked = 1;
88
89 /* -1 is returned for errors, 0 if it has no effect, 1 if
90 * PTRACE_ATTACH is forbidden. */
91 if (security_get_boolean_active("deny_ptrace") == 1)
92 fprintf(stderr,
93"The SELinux boolean 'deny_ptrace' is enabled, which may prevent ltrace from\n"
94"tracing other processes. You can disable this process attach protection by\n"
95"issuing 'setsebool deny_ptrace=0' in the superuser context.\n");
96#endif /* HAVE_LIBSELINUX */
97}
98
99void
100trace_me(void)
101{
Petr Machata26627682011-07-08 18:15:32 +0200102 debug(DEBUG_PROCESS, "trace_me: pid=%d", getpid());
Ian Wienand2d45b1a2006-02-20 22:48:07 +0100103 if (ptrace(PTRACE_TRACEME, 0, 1, 0) < 0) {
Juan Cespedes5e01f651998-03-08 22:31:44 +0100104 perror("PTRACE_TRACEME");
Petr Machatacec06ec2012-04-10 13:31:55 +0200105 trace_fail_warning(getpid());
Juan Cespedes5e01f651998-03-08 22:31:44 +0100106 exit(1);
107 }
108}
109
Petr Machatab4f9e0c2012-02-07 01:57:59 +0100110/* There's a (hopefully) brief period of time after the child process
111 * exec's when we can't trace it yet. Here we wait for kernel to
112 * prepare the process. */
113void
114wait_for_proc(pid_t pid)
115{
116 size_t i;
117 for (i = 0; i < 100; ++i) {
118 /* We read from memory address 0, but that shouldn't
119 * be a problem: the reading will just fail. We are
120 * looking for a particular reason of failure. */
121 if (ptrace(PTRACE_PEEKTEXT, pid, 0, 0) != -1
122 || errno != ESRCH)
123 return;
124
125 usleep(1000);
126 }
127
128 fprintf(stderr, "\
129I consistently fail to read a word from the freshly launched process.\n\
130I'll now try to proceed with tracing, but this shouldn't be happening.\n");
131}
132
Juan Cespedesf1350522008-12-16 18:19:58 +0100133int
Petr Machatacec06ec2012-04-10 13:31:55 +0200134trace_pid(pid_t pid)
135{
Petr Machata26627682011-07-08 18:15:32 +0200136 debug(DEBUG_PROCESS, "trace_pid: pid=%d", pid);
Petr Machatacec06ec2012-04-10 13:31:55 +0200137 /* This shouldn't emit error messages, as there are legitimate
138 * reasons that the PID can't be attached: like it may have
139 * already ended. */
140 if (ptrace(PTRACE_ATTACH, pid, 1, 0) < 0)
Juan Cespedes273ea6d1998-03-14 23:02:40 +0100141 return -1;
Petr Machata89a53602007-01-25 18:05:44 +0100142
Juan Cespedes714ee9d2009-04-07 13:28:54 +0200143 /* man ptrace: PTRACE_ATTACH attaches to the process specified
144 in pid. The child is sent a SIGSTOP, but will not
145 necessarily have stopped by the completion of this call;
146 use wait() to wait for the child to stop. */
Petr Machata9a5420c2011-07-09 11:21:23 +0200147 if (waitpid (pid, NULL, __WALL) != pid) {
Juan Cespedes714ee9d2009-04-07 13:28:54 +0200148 perror ("trace_pid: waitpid");
Petr Machata9a5420c2011-07-09 11:21:23 +0200149 return -1;
Juan Cespedes714ee9d2009-04-07 13:28:54 +0200150 }
151
Juan Cespedes273ea6d1998-03-14 23:02:40 +0100152 return 0;
153}
154
Juan Cespedesf1350522008-12-16 18:19:58 +0100155void
Juan Cespedesa8909f72009-04-28 20:02:41 +0200156trace_set_options(Process *proc, pid_t pid) {
Ian Wienand9a2ad352006-02-20 22:44:45 +0100157 if (proc->tracesysgood & 0x80)
158 return;
Petr Machata55ed83b2007-05-17 16:24:15 +0200159
Petr Machata26627682011-07-08 18:15:32 +0200160 debug(DEBUG_PROCESS, "trace_set_options: pid=%d", pid);
Juan Cespedescd8976d2009-05-14 13:47:58 +0200161
Juan Cespedes1e583132009-04-07 18:17:11 +0200162 long options = PTRACE_O_TRACESYSGOOD | PTRACE_O_TRACEFORK |
163 PTRACE_O_TRACEVFORK | PTRACE_O_TRACECLONE |
164 PTRACE_O_TRACEEXEC;
Petr Machata55ed83b2007-05-17 16:24:15 +0200165 if (ptrace(PTRACE_SETOPTIONS, pid, 0, options) < 0 &&
166 ptrace(PTRACE_OLDSETOPTIONS, pid, 0, options) < 0) {
Ian Wienand9a2ad352006-02-20 22:44:45 +0100167 perror("PTRACE_SETOPTIONS");
168 return;
169 }
170 proc->tracesysgood |= 0x80;
171}
172
Juan Cespedesf1350522008-12-16 18:19:58 +0100173void
174untrace_pid(pid_t pid) {
Petr Machata26627682011-07-08 18:15:32 +0200175 debug(DEBUG_PROCESS, "untrace_pid: pid=%d", pid);
Juan Cespedes273ea6d1998-03-14 23:02:40 +0100176 ptrace(PTRACE_DETACH, pid, 1, 0);
Juan Cespedes1fe93d51998-03-13 00:29:21 +0100177}
178
Juan Cespedesf1350522008-12-16 18:19:58 +0100179void
180continue_after_signal(pid_t pid, int signum) {
Juan Cespedescd8976d2009-05-14 13:47:58 +0200181 debug(DEBUG_PROCESS, "continue_after_signal: pid=%d, signum=%d", pid, signum);
Petr Machata98f09922011-07-09 10:55:29 +0200182 ptrace(PTRACE_SYSCALL, pid, 0, signum);
183}
184
185static enum ecb_status
186event_for_pid(Event * event, void * data)
187{
188 if (event->proc != NULL && event->proc->pid == (pid_t)(uintptr_t)data)
189 return ecb_yield;
190 return ecb_cont;
191}
192
193static int
194have_events_for(pid_t pid)
195{
196 return each_qd_event(event_for_pid, (void *)(uintptr_t)pid) != NULL;
197}
198
199void
200continue_process(pid_t pid)
201{
202 debug(DEBUG_PROCESS, "continue_process: pid=%d", pid);
Petr Machata98f09922011-07-09 10:55:29 +0200203
204 /* Only really continue the process if there are no events in
Petr Machata36d19822011-10-21 16:03:45 +0200205 the queue for this process. Otherwise just wait for the
206 other events to arrive. */
Petr Machata98f09922011-07-09 10:55:29 +0200207 if (!have_events_for(pid))
208 /* We always trace syscalls to control fork(),
209 * clone(), execve()... */
210 ptrace(PTRACE_SYSCALL, pid, 0, 0);
211 else
212 debug(DEBUG_PROCESS,
213 "putting off the continue, events in que.");
214}
215
216/**
217 * This is used for bookkeeping related to PIDs that the event
Petr Machata750ca8c2011-10-06 14:29:34 +0200218 * handlers work with.
219 */
Petr Machata98f09922011-07-09 10:55:29 +0200220struct pid_task {
Petr Machata750ca8c2011-10-06 14:29:34 +0200221 pid_t pid; /* This may be 0 for tasks that exited
222 * mid-handling. */
Petr Machatacbe29c62011-09-27 02:27:58 +0200223 int sigstopped : 1;
224 int got_event : 1;
225 int delivered : 1;
226 int vforked : 1;
Petr Machata43d2fe52011-11-02 13:25:49 +0100227 int sysret : 1;
Petr Machata98f09922011-07-09 10:55:29 +0200228} * pids;
229
230struct pid_set {
231 struct pid_task * tasks;
232 size_t count;
233 size_t alloc;
234};
235
236/**
237 * Breakpoint re-enablement. When we hit a breakpoint, we must
238 * disable it, single-step, and re-enable it. That single-step can be
239 * done only by one task in a task group, while others are stopped,
240 * otherwise the processes would race for who sees the breakpoint
241 * disabled and who doesn't. The following is to keep track of it
242 * all.
243 */
244struct process_stopping_handler
245{
Petr Machata366c2f42012-02-09 19:34:36 +0100246 struct event_handler super;
Petr Machata98f09922011-07-09 10:55:29 +0200247
248 /* The task that is doing the re-enablement. */
249 Process * task_enabling_breakpoint;
250
251 /* The pointer being re-enabled. */
Petr Machatabc373262012-02-07 23:31:15 +0100252 struct breakpoint *breakpoint_being_enabled;
Petr Machata98f09922011-07-09 10:55:29 +0200253
Petr Machataa266acb2012-04-12 23:50:23 +0200254 /* Artificial atomic skip breakpoint, if any needed. */
255 void *atomic_skip_bp_addr;
256
Petr Machata98f09922011-07-09 10:55:29 +0200257 enum {
258 /* We are waiting for everyone to land in t/T. */
259 psh_stopping = 0,
260
261 /* We are doing the PTRACE_SINGLESTEP. */
262 psh_singlestep,
263
264 /* We are waiting for all the SIGSTOPs to arrive so
265 * that we can sink them. */
266 psh_sinking,
Petr Machata46d66ab2011-08-20 05:29:25 +0200267
268 /* This is for tracking the ugly workaround. */
269 psh_ugly_workaround,
Petr Machata98f09922011-07-09 10:55:29 +0200270 } state;
271
Petr Machata590c8082011-08-20 22:45:26 +0200272 int exiting;
273
Petr Machata98f09922011-07-09 10:55:29 +0200274 struct pid_set pids;
275};
276
Petr Machata98f09922011-07-09 10:55:29 +0200277static struct pid_task *
278get_task_info(struct pid_set * pids, pid_t pid)
279{
Petr Machata750ca8c2011-10-06 14:29:34 +0200280 assert(pid != 0);
Petr Machata98f09922011-07-09 10:55:29 +0200281 size_t i;
282 for (i = 0; i < pids->count; ++i)
283 if (pids->tasks[i].pid == pid)
284 return &pids->tasks[i];
285
286 return NULL;
287}
288
289static struct pid_task *
290add_task_info(struct pid_set * pids, pid_t pid)
291{
292 if (pids->count == pids->alloc) {
293 size_t ns = (2 * pids->alloc) ?: 4;
294 struct pid_task * n = realloc(pids->tasks,
295 sizeof(*pids->tasks) * ns);
296 if (n == NULL)
297 return NULL;
298 pids->tasks = n;
299 pids->alloc = ns;
300 }
301 struct pid_task * task_info = &pids->tasks[pids->count++];
302 memset(task_info, 0, sizeof(*task_info));
303 task_info->pid = pid;
304 return task_info;
305}
306
Petr Machata2b46cfc2012-02-18 11:17:29 +0100307static enum callback_status
308task_stopped(struct Process *task, void *data)
Petr Machatacbe29c62011-09-27 02:27:58 +0200309{
310 enum process_status st = process_status(task->pid);
311 if (data != NULL)
312 *(enum process_status *)data = st;
313
314 /* If the task is already stopped, don't worry about it.
315 * Likewise if it managed to become a zombie or terminate in
316 * the meantime. This can happen when the whole thread group
317 * is terminating. */
318 switch (st) {
319 case ps_invalid:
320 case ps_tracing_stop:
321 case ps_zombie:
Petr Machata2b46cfc2012-02-18 11:17:29 +0100322 return CBS_CONT;
Petr Machataffe4cd22012-04-11 18:01:44 +0200323 case ps_sleeping:
Petr Machata36d19822011-10-21 16:03:45 +0200324 case ps_stop:
325 case ps_other:
Petr Machata2b46cfc2012-02-18 11:17:29 +0100326 return CBS_STOP;
Petr Machatacbe29c62011-09-27 02:27:58 +0200327 }
Petr Machata36d19822011-10-21 16:03:45 +0200328
329 abort ();
Petr Machatacbe29c62011-09-27 02:27:58 +0200330}
331
332/* Task is blocked if it's stopped, or if it's a vfork parent. */
Petr Machata2b46cfc2012-02-18 11:17:29 +0100333static enum callback_status
334task_blocked(struct Process *task, void *data)
Petr Machatacbe29c62011-09-27 02:27:58 +0200335{
336 struct pid_set * pids = data;
337 struct pid_task * task_info = get_task_info(pids, task->pid);
338 if (task_info != NULL
339 && task_info->vforked)
Petr Machata2b46cfc2012-02-18 11:17:29 +0100340 return CBS_CONT;
Petr Machatacbe29c62011-09-27 02:27:58 +0200341
342 return task_stopped(task, NULL);
343}
344
Petr Machata366c2f42012-02-09 19:34:36 +0100345static Event *process_vfork_on_event(struct event_handler *super, Event *event);
Petr Machatacbe29c62011-09-27 02:27:58 +0200346
Petr Machata2b46cfc2012-02-18 11:17:29 +0100347static enum callback_status
348task_vforked(struct Process *task, void *data)
Petr Machatacbe29c62011-09-27 02:27:58 +0200349{
350 if (task->event_handler != NULL
351 && task->event_handler->on_event == &process_vfork_on_event)
Petr Machata2b46cfc2012-02-18 11:17:29 +0100352 return CBS_STOP;
353 return CBS_CONT;
Petr Machatacbe29c62011-09-27 02:27:58 +0200354}
355
356static int
357is_vfork_parent(Process * task)
358{
359 return each_task(task->leader, &task_vforked, NULL) != NULL;
360}
361
Petr Machata2b46cfc2012-02-18 11:17:29 +0100362static enum callback_status
363send_sigstop(struct Process *task, void *data)
Petr Machata98f09922011-07-09 10:55:29 +0200364{
365 Process * leader = task->leader;
366 struct pid_set * pids = data;
367
368 /* Look for pre-existing task record, or add new. */
369 struct pid_task * task_info = get_task_info(pids, task->pid);
370 if (task_info == NULL)
371 task_info = add_task_info(pids, task->pid);
372 if (task_info == NULL) {
373 perror("send_sigstop: add_task_info");
374 destroy_event_handler(leader);
375 /* Signal failure upwards. */
Petr Machata2b46cfc2012-02-18 11:17:29 +0100376 return CBS_STOP;
Petr Machata98f09922011-07-09 10:55:29 +0200377 }
378
379 /* This task still has not been attached to. It should be
380 stopped by the kernel. */
381 if (task->state == STATE_BEING_CREATED)
Petr Machata2b46cfc2012-02-18 11:17:29 +0100382 return CBS_CONT;
Petr Machata98f09922011-07-09 10:55:29 +0200383
384 /* Don't bother sending SIGSTOP if we are already stopped, or
Petr Machatacbe29c62011-09-27 02:27:58 +0200385 * if we sent the SIGSTOP already, which happens when we are
386 * handling "onexit" and inherited the handler from breakpoint
387 * re-enablement. */
388 enum process_status st;
Petr Machata2b46cfc2012-02-18 11:17:29 +0100389 if (task_stopped(task, &st) == CBS_CONT)
390 return CBS_CONT;
Petr Machata98f09922011-07-09 10:55:29 +0200391 if (task_info->sigstopped) {
392 if (!task_info->delivered)
Petr Machata2b46cfc2012-02-18 11:17:29 +0100393 return CBS_CONT;
Petr Machata98f09922011-07-09 10:55:29 +0200394 task_info->delivered = 0;
395 }
396
Petr Machatacbe29c62011-09-27 02:27:58 +0200397 /* Also don't attempt to stop the process if it's a parent of
398 * vforked process. We set up event handler specially to hint
399 * us. In that case parent is in D state, which we use to
400 * weed out unnecessary looping. */
401 if (st == ps_sleeping
402 && is_vfork_parent (task)) {
403 task_info->vforked = 1;
Petr Machata2b46cfc2012-02-18 11:17:29 +0100404 return CBS_CONT;
Petr Machatacbe29c62011-09-27 02:27:58 +0200405 }
406
Petr Machata98f09922011-07-09 10:55:29 +0200407 if (task_kill(task->pid, SIGSTOP) >= 0) {
408 debug(DEBUG_PROCESS, "send SIGSTOP to %d", task->pid);
409 task_info->sigstopped = 1;
410 } else
411 fprintf(stderr,
412 "Warning: couldn't send SIGSTOP to %d\n", task->pid);
413
Petr Machata2b46cfc2012-02-18 11:17:29 +0100414 return CBS_CONT;
Petr Machata98f09922011-07-09 10:55:29 +0200415}
416
Petr Machata73894bd2011-08-20 23:47:34 +0200417/* On certain kernels, detaching right after a singlestep causes the
418 tracee to be killed with a SIGTRAP (that even though the singlestep
419 was properly caught by waitpid. The ugly workaround is to put a
420 breakpoint where IP points and let the process continue. After
421 this the breakpoint can be retracted and the process detached. */
Petr Machata98f09922011-07-09 10:55:29 +0200422static void
Petr Machata73894bd2011-08-20 23:47:34 +0200423ugly_workaround(Process * proc)
Petr Machata590c8082011-08-20 22:45:26 +0200424{
425 void * ip = get_instruction_pointer(proc);
Petr Machatabc373262012-02-07 23:31:15 +0100426 struct breakpoint *sbp = dict_find_entry(proc->leader->breakpoints, ip);
Petr Machata590c8082011-08-20 22:45:26 +0200427 if (sbp != NULL)
428 enable_breakpoint(proc, sbp);
429 else
430 insert_breakpoint(proc, ip, NULL, 1);
Petr Machata73894bd2011-08-20 23:47:34 +0200431 ptrace(PTRACE_CONT, proc->pid, 0, 0);
Petr Machata590c8082011-08-20 22:45:26 +0200432}
433
434static void
Petr Machata98f09922011-07-09 10:55:29 +0200435process_stopping_done(struct process_stopping_handler * self, Process * leader)
436{
437 debug(DEBUG_PROCESS, "process stopping done %d",
438 self->task_enabling_breakpoint->pid);
439 size_t i;
Petr Machata590c8082011-08-20 22:45:26 +0200440 if (!self->exiting) {
441 for (i = 0; i < self->pids.count; ++i)
442 if (self->pids.tasks[i].pid != 0
Petr Machata43d2fe52011-11-02 13:25:49 +0100443 && (self->pids.tasks[i].delivered
444 || self->pids.tasks[i].sysret))
Petr Machata590c8082011-08-20 22:45:26 +0200445 continue_process(self->pids.tasks[i].pid);
446 continue_process(self->task_enabling_breakpoint->pid);
447 destroy_event_handler(leader);
448 } else {
449 self->state = psh_ugly_workaround;
Petr Machata73894bd2011-08-20 23:47:34 +0200450 ugly_workaround(self->task_enabling_breakpoint);
Petr Machata590c8082011-08-20 22:45:26 +0200451 }
452}
453
454/* Before we detach, we need to make sure that task's IP is on the
455 * edge of an instruction. So for tasks that have a breakpoint event
456 * in the queue, we adjust the instruction pointer, just like
457 * continue_after_breakpoint does. */
458static enum ecb_status
459undo_breakpoint(Event * event, void * data)
460{
461 if (event != NULL
462 && event->proc->leader == data
463 && event->type == EVENT_BREAKPOINT)
464 set_instruction_pointer(event->proc, event->e_un.brk_addr);
465 return ecb_cont;
466}
467
Petr Machata2b46cfc2012-02-18 11:17:29 +0100468static enum callback_status
469untrace_task(struct Process *task, void *data)
Petr Machata590c8082011-08-20 22:45:26 +0200470{
471 if (task != data)
472 untrace_pid(task->pid);
Petr Machata2b46cfc2012-02-18 11:17:29 +0100473 return CBS_CONT;
Petr Machata590c8082011-08-20 22:45:26 +0200474}
475
Petr Machata2b46cfc2012-02-18 11:17:29 +0100476static enum callback_status
477remove_task(struct Process *task, void *data)
Petr Machata590c8082011-08-20 22:45:26 +0200478{
479 /* Don't untrace leader just yet. */
480 if (task != data)
481 remove_process(task);
Petr Machata2b46cfc2012-02-18 11:17:29 +0100482 return CBS_CONT;
Petr Machata590c8082011-08-20 22:45:26 +0200483}
484
485static void
486detach_process(Process * leader)
487{
488 each_qd_event(&undo_breakpoint, leader);
489 disable_all_breakpoints(leader);
490
491 /* Now untrace the process, if it was attached to by -p. */
492 struct opt_p_t * it;
493 for (it = opt_p; it != NULL; it = it->next) {
494 Process * proc = pid2proc(it->pid);
495 if (proc == NULL)
496 continue;
497 if (proc->leader == leader) {
498 each_task(leader, &untrace_task, NULL);
499 break;
500 }
501 }
502 each_task(leader, &remove_task, leader);
Petr Machata98f09922011-07-09 10:55:29 +0200503 destroy_event_handler(leader);
Petr Machata590c8082011-08-20 22:45:26 +0200504 remove_task(leader, NULL);
Petr Machata98f09922011-07-09 10:55:29 +0200505}
506
507static void
508handle_stopping_event(struct pid_task * task_info, Event ** eventp)
509{
510 /* Mark all events, so that we know whom to SIGCONT later. */
Petr Machata3c9b6292011-08-20 15:05:41 +0200511 if (task_info != NULL)
Petr Machata98f09922011-07-09 10:55:29 +0200512 task_info->got_event = 1;
513
514 Event * event = *eventp;
515
516 /* In every state, sink SIGSTOP events for tasks that it was
517 * sent to. */
518 if (task_info != NULL
519 && event->type == EVENT_SIGNAL
520 && event->e_un.signum == SIGSTOP) {
521 debug(DEBUG_PROCESS, "SIGSTOP delivered to %d", task_info->pid);
522 if (task_info->sigstopped
523 && !task_info->delivered) {
524 task_info->delivered = 1;
525 *eventp = NULL; // sink the event
526 } else
527 fprintf(stderr, "suspicious: %d got SIGSTOP, but "
528 "sigstopped=%d and delivered=%d\n",
529 task_info->pid, task_info->sigstopped,
530 task_info->delivered);
Juan Cespedese74c80d2009-02-11 11:32:31 +0100531 }
Juan Cespedes5e01f651998-03-08 22:31:44 +0100532}
533
Petr Machata98f09922011-07-09 10:55:29 +0200534/* Some SIGSTOPs may have not been delivered to their respective tasks
535 * yet. They are still in the queue. If we have seen an event for
536 * that process, continue it, so that the SIGSTOP can be delivered and
Petr Machata36d19822011-10-21 16:03:45 +0200537 * caught by ltrace. We don't mind that the process is after
538 * breakpoint (and therefore potentially doesn't have aligned IP),
539 * because the signal will be delivered without the process actually
540 * starting. */
Petr Machata98f09922011-07-09 10:55:29 +0200541static void
542continue_for_sigstop_delivery(struct pid_set * pids)
543{
544 size_t i;
545 for (i = 0; i < pids->count; ++i) {
Petr Machata750ca8c2011-10-06 14:29:34 +0200546 if (pids->tasks[i].pid != 0
547 && pids->tasks[i].sigstopped
Petr Machata98f09922011-07-09 10:55:29 +0200548 && !pids->tasks[i].delivered
549 && pids->tasks[i].got_event) {
550 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery",
551 pids->tasks[i].pid);
552 ptrace(PTRACE_SYSCALL, pids->tasks[i].pid, 0, 0);
553 }
554 }
Juan Cespedes5e01f651998-03-08 22:31:44 +0100555}
556
Petr Machata98f09922011-07-09 10:55:29 +0200557static int
Petr Machata750ca8c2011-10-06 14:29:34 +0200558event_exit_p(Event * event)
559{
560 return event != NULL && (event->type == EVENT_EXIT
561 || event->type == EVENT_EXIT_SIGNAL);
562}
563
564static int
Petr Machata98f09922011-07-09 10:55:29 +0200565event_exit_or_none_p(Event * event)
Petr Machataf789c9c2011-07-09 10:54:27 +0200566{
Petr Machata750ca8c2011-10-06 14:29:34 +0200567 return event == NULL || event_exit_p(event)
Petr Machata98f09922011-07-09 10:55:29 +0200568 || event->type == EVENT_NONE;
569}
570
571static int
572await_sigstop_delivery(struct pid_set * pids, struct pid_task * task_info,
573 Event * event)
574{
575 /* If we still didn't get our SIGSTOP, continue the process
576 * and carry on. */
577 if (event != NULL && !event_exit_or_none_p(event)
578 && task_info != NULL && task_info->sigstopped) {
579 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery",
580 task_info->pid);
581 /* We should get the signal the first thing
582 * after this, so it should be OK to continue
583 * even if we are over a breakpoint. */
584 ptrace(PTRACE_SYSCALL, task_info->pid, 0, 0);
585
586 } else {
587 /* If all SIGSTOPs were delivered, uninstall the
588 * handler and continue everyone. */
589 /* XXX I suspect that we should check tasks that are
590 * still around. Is things are now, there should be a
591 * race between waiting for everyone to stop and one
592 * of the tasks exiting. */
593 int all_clear = 1;
594 size_t i;
595 for (i = 0; i < pids->count; ++i)
Petr Machata750ca8c2011-10-06 14:29:34 +0200596 if (pids->tasks[i].pid != 0
597 && pids->tasks[i].sigstopped
Petr Machata98f09922011-07-09 10:55:29 +0200598 && !pids->tasks[i].delivered) {
599 all_clear = 0;
600 break;
601 }
602 return all_clear;
603 }
604
605 return 0;
606}
607
Petr Machata590c8082011-08-20 22:45:26 +0200608static int
609all_stops_accountable(struct pid_set * pids)
610{
611 size_t i;
612 for (i = 0; i < pids->count; ++i)
613 if (pids->tasks[i].pid != 0
614 && !pids->tasks[i].got_event
615 && !have_events_for(pids->tasks[i].pid))
616 return 0;
617 return 1;
618}
619
Petr Machataa266acb2012-04-12 23:50:23 +0200620/* The protocol is: 0 for success, negative for failure, positive if
621 * default singlestep is to be used. */
622int arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp,
623 int (*add_cb)(void *addr, void *data),
624 void *add_cb_data);
625
626#ifndef ARCH_HAVE_ATOMIC_SINGLESTEP
627int
628arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp,
629 int (*add_cb)(void *addr, void *data),
630 void *add_cb_data)
Petr Machata06986d52011-11-02 13:22:46 +0100631{
Petr Machataa266acb2012-04-12 23:50:23 +0200632 return 1;
633}
634#endif
635
636static int
637atomic_singlestep_add_bp(void *addr, void *data)
638{
639 struct process_stopping_handler *self = data;
640 struct Process *proc = self->task_enabling_breakpoint;
641
642 /* Only support single address as of now. */
643 assert(self->atomic_skip_bp_addr == NULL);
644
645 self->atomic_skip_bp_addr = addr + 4;
646 insert_breakpoint(proc->leader, self->atomic_skip_bp_addr, NULL, 1);
647
648 return 0;
649}
650
651static int
652singlestep(struct process_stopping_handler *self)
653{
654 struct Process *proc = self->task_enabling_breakpoint;
655
656 int status = arch_atomic_singlestep(self->task_enabling_breakpoint,
657 self->breakpoint_being_enabled,
658 &atomic_singlestep_add_bp, self);
659
660 /* Propagate failure and success. */
661 if (status <= 0)
662 return status;
663
664 /* Otherwise do the default action: singlestep. */
Petr Machata06986d52011-11-02 13:22:46 +0100665 debug(1, "PTRACE_SINGLESTEP");
Petr Machataa266acb2012-04-12 23:50:23 +0200666 if (ptrace(PTRACE_SINGLESTEP, proc->pid, 0, 0)) {
Petr Machata06986d52011-11-02 13:22:46 +0100667 perror("PTRACE_SINGLESTEP");
Petr Machataa266acb2012-04-12 23:50:23 +0200668 return -1;
669 }
670 return 0;
671}
672
673static void
674post_singlestep(struct process_stopping_handler *self, Event **eventp)
675{
676 continue_for_sigstop_delivery(&self->pids);
677
678 if ((*eventp)->type == EVENT_BREAKPOINT)
679 *eventp = NULL; // handled
680
681 if (self->atomic_skip_bp_addr != 0)
682 delete_breakpoint(self->task_enabling_breakpoint->leader,
683 self->atomic_skip_bp_addr);
684
685 self->breakpoint_being_enabled = NULL;
686}
687
688static void
689singlestep_error(struct process_stopping_handler *self, Event **eventp)
690{
691 struct Process *teb = self->task_enabling_breakpoint;
692 Breakpoint *sbp = self->breakpoint_being_enabled;
693 fprintf(stderr, "%d couldn't singlestep over %s (%p)\n",
694 teb->pid, sbp->libsym != NULL ? sbp->libsym->name : NULL,
695 sbp->addr);
696 delete_breakpoint(teb->leader, sbp->addr);
697 post_singlestep(self, eventp);
Petr Machata06986d52011-11-02 13:22:46 +0100698}
699
Petr Machata98f09922011-07-09 10:55:29 +0200700/* This event handler is installed when we are in the process of
701 * stopping the whole thread group to do the pointer re-enablement for
702 * one of the threads. We pump all events to the queue for later
703 * processing while we wait for all the threads to stop. When this
704 * happens, we let the re-enablement thread to PTRACE_SINGLESTEP,
705 * re-enable, and continue everyone. */
706static Event *
Petr Machata366c2f42012-02-09 19:34:36 +0100707process_stopping_on_event(struct event_handler *super, Event *event)
Petr Machata98f09922011-07-09 10:55:29 +0200708{
709 struct process_stopping_handler * self = (void *)super;
710 Process * task = event->proc;
711 Process * leader = task->leader;
Petr Machatabc373262012-02-07 23:31:15 +0100712 struct breakpoint *sbp = self->breakpoint_being_enabled;
Petr Machatae21264e2011-10-06 14:30:33 +0200713 Process * teb = self->task_enabling_breakpoint;
Petr Machata98f09922011-07-09 10:55:29 +0200714
715 debug(DEBUG_PROCESS,
716 "pid %d; event type %d; state %d",
717 task->pid, event->type, self->state);
718
719 struct pid_task * task_info = get_task_info(&self->pids, task->pid);
720 if (task_info == NULL)
721 fprintf(stderr, "new task??? %d\n", task->pid);
722 handle_stopping_event(task_info, &event);
723
724 int state = self->state;
725 int event_to_queue = !event_exit_or_none_p(event);
726
Petr Machata18c97072011-10-06 14:30:11 +0200727 /* Deactivate the entry if the task exits. */
728 if (event_exit_p(event) && task_info != NULL)
729 task_info->pid = 0;
730
Petr Machata43d2fe52011-11-02 13:25:49 +0100731 /* Always handle sysrets. Whether sysret occurred and what
732 * sys it rets from may need to be determined based on process
733 * stack, so we need to keep that in sync with reality. Note
734 * that we don't continue the process after the sysret is
735 * handled. See continue_after_syscall. */
736 if (event != NULL && event->type == EVENT_SYSRET) {
737 debug(1, "%d LT_EV_SYSRET", event->proc->pid);
738 event_to_queue = 0;
739 task_info->sysret = 1;
740 }
741
Petr Machata98f09922011-07-09 10:55:29 +0200742 switch (state) {
743 case psh_stopping:
744 /* If everyone is stopped, singlestep. */
Petr Machatacbe29c62011-09-27 02:27:58 +0200745 if (each_task(leader, &task_blocked, &self->pids) == NULL) {
Petr Machata98f09922011-07-09 10:55:29 +0200746 debug(DEBUG_PROCESS, "all stopped, now SINGLESTEP %d",
Petr Machatae21264e2011-10-06 14:30:33 +0200747 teb->pid);
748 if (sbp->enabled)
749 disable_breakpoint(teb, sbp);
Petr Machataa266acb2012-04-12 23:50:23 +0200750 if (singlestep(self) < 0) {
751 singlestep_error(self, &event);
752 goto psh_sinking;
753 }
754
Petr Machata98f09922011-07-09 10:55:29 +0200755 self->state = state = psh_singlestep;
756 }
757 break;
758
Petr Machata06986d52011-11-02 13:22:46 +0100759 case psh_singlestep:
Petr Machata98f09922011-07-09 10:55:29 +0200760 /* In singlestep state, breakpoint signifies that we
761 * have now stepped, and can re-enable the breakpoint. */
Petr Machatae21264e2011-10-06 14:30:33 +0200762 if (event != NULL && task == teb) {
Petr Machatad5d93c42011-10-21 16:41:10 +0200763
Petr Machata06986d52011-11-02 13:22:46 +0100764 /* This is not the singlestep that we are waiting for. */
Petr Machatad5d93c42011-10-21 16:41:10 +0200765 if (event->type == EVENT_SIGNAL) {
Petr Machataa266acb2012-04-12 23:50:23 +0200766 if (singlestep(self) < 0) {
767 singlestep_error(self, &event);
768 goto psh_sinking;
769 }
Petr Machatad5d93c42011-10-21 16:41:10 +0200770 break;
771 }
772
Petr Machata98f09922011-07-09 10:55:29 +0200773 /* Essentially we don't care what event caused
774 * the thread to stop. We can do the
775 * re-enablement now. */
Petr Machata590c8082011-08-20 22:45:26 +0200776 if (sbp->enabled)
777 enable_breakpoint(teb, sbp);
Petr Machata98f09922011-07-09 10:55:29 +0200778
Petr Machataa266acb2012-04-12 23:50:23 +0200779 post_singlestep(self, &event);
780 goto psh_sinking;
781 }
782 break;
Petr Machata98f09922011-07-09 10:55:29 +0200783
Petr Machataa266acb2012-04-12 23:50:23 +0200784 psh_sinking:
785 state = self->state = psh_sinking;
Petr Machata98f09922011-07-09 10:55:29 +0200786 case psh_sinking:
787 if (await_sigstop_delivery(&self->pids, task_info, event))
788 process_stopping_done(self, leader);
Petr Machata590c8082011-08-20 22:45:26 +0200789 break;
790
791 case psh_ugly_workaround:
792 if (event == NULL)
793 break;
794 if (event->type == EVENT_BREAKPOINT) {
795 undo_breakpoint(event, leader);
796 if (task == teb)
797 self->task_enabling_breakpoint = NULL;
798 }
799 if (self->task_enabling_breakpoint == NULL
800 && all_stops_accountable(&self->pids)) {
801 undo_breakpoint(event, leader);
802 detach_process(leader);
803 event = NULL; // handled
804 }
Petr Machata98f09922011-07-09 10:55:29 +0200805 }
806
807 if (event != NULL && event_to_queue) {
808 enque_event(event);
809 event = NULL; // sink the event
810 }
811
812 return event;
813}
814
815static void
Petr Machata366c2f42012-02-09 19:34:36 +0100816process_stopping_destroy(struct event_handler *super)
Petr Machata98f09922011-07-09 10:55:29 +0200817{
818 struct process_stopping_handler * self = (void *)super;
Petr Machata98f09922011-07-09 10:55:29 +0200819 free(self->pids.tasks);
Juan Cespedes5e01f651998-03-08 22:31:44 +0100820}
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +0100821
Juan Cespedesf1350522008-12-16 18:19:58 +0100822void
Petr Machatabc373262012-02-07 23:31:15 +0100823continue_after_breakpoint(Process *proc, struct breakpoint *sbp)
Petr Machata26627682011-07-08 18:15:32 +0200824{
Juan Cespedes5c3fe062004-06-14 18:08:37 +0200825 set_instruction_pointer(proc, sbp->addr);
Juan Cespedes8f8282f2002-03-03 18:58:40 +0100826 if (sbp->enabled == 0) {
827 continue_process(proc->pid);
828 } else {
Petr Machata26627682011-07-08 18:15:32 +0200829 debug(DEBUG_PROCESS,
830 "continue_after_breakpoint: pid=%d, addr=%p",
831 proc->pid, sbp->addr);
Arnaud Patardf3d1c532010-01-08 08:40:04 -0500832#if defined __sparc__ || defined __ia64___ || defined __mips__
Ian Wienand9a2ad352006-02-20 22:44:45 +0100833 /* we don't want to singlestep here */
Juan Cespedes5c3fe062004-06-14 18:08:37 +0200834 continue_process(proc->pid);
835#else
Petr Machata98f09922011-07-09 10:55:29 +0200836 struct process_stopping_handler * handler
837 = calloc(sizeof(*handler), 1);
838 if (handler == NULL) {
839 perror("malloc breakpoint disable handler");
840 fatal:
841 /* Carry on not bothering to re-enable. */
842 continue_process(proc->pid);
843 return;
844 }
845
846 handler->super.on_event = process_stopping_on_event;
847 handler->super.destroy = process_stopping_destroy;
848 handler->task_enabling_breakpoint = proc;
849 handler->breakpoint_being_enabled = sbp;
850 install_event_handler(proc->leader, &handler->super);
851
852 if (each_task(proc->leader, &send_sigstop,
853 &handler->pids) != NULL)
854 goto fatal;
855
856 /* And deliver the first fake event, in case all the
857 * conditions are already fulfilled. */
858 Event ev;
859 ev.type = EVENT_NONE;
860 ev.proc = proc;
861 process_stopping_on_event(&handler->super, &ev);
Juan Cespedes5c3fe062004-06-14 18:08:37 +0200862#endif
Juan Cespedes8f8282f2002-03-03 18:58:40 +0100863 }
864}
865
Petr Machata602330f2011-07-09 11:15:34 +0200866/**
867 * Ltrace exit. When we are about to exit, we have to go through all
868 * the processes, stop them all, remove all the breakpoints, and then
869 * detach the processes that we attached to using -p. If we left the
870 * other tasks running, they might hit stray return breakpoints and
871 * produce artifacts, so we better stop everyone, even if it's a bit
872 * of extra work.
873 */
874struct ltrace_exiting_handler
875{
Petr Machata366c2f42012-02-09 19:34:36 +0100876 struct event_handler super;
Petr Machata602330f2011-07-09 11:15:34 +0200877 struct pid_set pids;
878};
879
Petr Machata602330f2011-07-09 11:15:34 +0200880static Event *
Petr Machata366c2f42012-02-09 19:34:36 +0100881ltrace_exiting_on_event(struct event_handler *super, Event *event)
Petr Machata602330f2011-07-09 11:15:34 +0200882{
883 struct ltrace_exiting_handler * self = (void *)super;
884 Process * task = event->proc;
885 Process * leader = task->leader;
886
887 debug(DEBUG_PROCESS, "pid %d; event type %d", task->pid, event->type);
888
889 struct pid_task * task_info = get_task_info(&self->pids, task->pid);
890 handle_stopping_event(task_info, &event);
891
Petr Machata590c8082011-08-20 22:45:26 +0200892 if (event != NULL && event->type == EVENT_BREAKPOINT)
893 undo_breakpoint(event, leader);
Petr Machata4b9f4d92011-08-20 04:07:05 +0200894
895 if (await_sigstop_delivery(&self->pids, task_info, event)
Petr Machata590c8082011-08-20 22:45:26 +0200896 && all_stops_accountable(&self->pids))
897 detach_process(leader);
Petr Machata602330f2011-07-09 11:15:34 +0200898
899 /* Sink all non-exit events. We are about to exit, so we
900 * don't bother with queuing them. */
901 if (event_exit_or_none_p(event))
902 return event;
Petr Machata13d5df72011-08-19 23:15:15 +0200903
Petr Machata13d5df72011-08-19 23:15:15 +0200904 return NULL;
Petr Machata602330f2011-07-09 11:15:34 +0200905}
906
907static void
Petr Machata366c2f42012-02-09 19:34:36 +0100908ltrace_exiting_destroy(struct event_handler *super)
Petr Machata602330f2011-07-09 11:15:34 +0200909{
910 struct ltrace_exiting_handler * self = (void *)super;
911 free(self->pids.tasks);
912}
913
914static int
915ltrace_exiting_install_handler(Process * proc)
916{
917 /* Only install to leader. */
918 if (proc->leader != proc)
919 return 0;
920
921 /* Perhaps we are already installed, if the user passed
922 * several -p options that are tasks of one process. */
923 if (proc->event_handler != NULL
924 && proc->event_handler->on_event == &ltrace_exiting_on_event)
925 return 0;
926
Petr Machata590c8082011-08-20 22:45:26 +0200927 /* If stopping handler is already present, let it do the
928 * work. */
929 if (proc->event_handler != NULL) {
930 assert(proc->event_handler->on_event
931 == &process_stopping_on_event);
932 struct process_stopping_handler * other
933 = (void *)proc->event_handler;
934 other->exiting = 1;
935 return 0;
936 }
937
Petr Machata602330f2011-07-09 11:15:34 +0200938 struct ltrace_exiting_handler * handler
939 = calloc(sizeof(*handler), 1);
940 if (handler == NULL) {
941 perror("malloc exiting handler");
942 fatal:
943 /* XXXXXXXXXXXXXXXXXXX fixme */
944 return -1;
945 }
946
Petr Machata602330f2011-07-09 11:15:34 +0200947 handler->super.on_event = ltrace_exiting_on_event;
948 handler->super.destroy = ltrace_exiting_destroy;
949 install_event_handler(proc->leader, &handler->super);
950
951 if (each_task(proc->leader, &send_sigstop,
952 &handler->pids) != NULL)
953 goto fatal;
954
955 return 0;
956}
957
Petr Machatacbe29c62011-09-27 02:27:58 +0200958/*
959 * When the traced process vforks, it's suspended until the child
960 * process calls _exit or exec*. In the meantime, the two share the
961 * address space.
962 *
963 * The child process should only ever call _exit or exec*, but we
964 * can't count on that (it's not the role of ltrace to policy, but to
965 * observe). In any case, we will _at least_ have to deal with
966 * removal of vfork return breakpoint (which we have to smuggle back
967 * in, so that the parent can see it, too), and introduction of exec*
968 * return breakpoint. Since we already have both breakpoint actions
969 * to deal with, we might as well support it all.
970 *
971 * The gist is that we pretend that the child is in a thread group
972 * with its parent, and handle it as a multi-threaded case, with the
973 * exception that we know that the parent is blocked, and don't
974 * attempt to stop it. When the child execs, we undo the setup.
Petr Machatacbe29c62011-09-27 02:27:58 +0200975 */
976
Petr Machata134a1082011-09-27 20:25:58 +0200977struct process_vfork_handler
978{
Petr Machata366c2f42012-02-09 19:34:36 +0100979 struct event_handler super;
Petr Machata134a1082011-09-27 20:25:58 +0200980 void * bp_addr;
981};
982
Petr Machatacbe29c62011-09-27 02:27:58 +0200983static Event *
Petr Machata366c2f42012-02-09 19:34:36 +0100984process_vfork_on_event(struct event_handler *super, Event *event)
Petr Machatacbe29c62011-09-27 02:27:58 +0200985{
986 struct process_vfork_handler * self = (void *)super;
Petr Machatabc373262012-02-07 23:31:15 +0100987 struct breakpoint *sbp;
Petr Machatacbe29c62011-09-27 02:27:58 +0200988 assert(self != NULL);
989
990 switch (event->type) {
Petr Machata134a1082011-09-27 20:25:58 +0200991 case EVENT_BREAKPOINT:
992 /* Remember the vfork return breakpoint. */
993 if (self->bp_addr == NULL)
994 self->bp_addr = event->e_un.brk_addr;
995 break;
996
Petr Machatacbe29c62011-09-27 02:27:58 +0200997 case EVENT_EXIT:
998 case EVENT_EXIT_SIGNAL:
999 case EVENT_EXEC:
Petr Machata134a1082011-09-27 20:25:58 +02001000 /* Smuggle back in the vfork return breakpoint, so
1001 * that our parent can trip over it once again. */
1002 if (self->bp_addr != NULL) {
1003 sbp = dict_find_entry(event->proc->leader->breakpoints,
1004 self->bp_addr);
1005 if (sbp != NULL)
Petr Machata3797cd62011-10-03 19:23:37 +02001006 insert_breakpoint(event->proc->parent,
1007 self->bp_addr,
1008 sbp->libsym, 1);
Petr Machata134a1082011-09-27 20:25:58 +02001009 }
1010
Petr Machataba9911f2011-09-27 21:09:47 +02001011 continue_process(event->proc->parent->pid);
Petr Machata134a1082011-09-27 20:25:58 +02001012
1013 /* Remove the leader that we artificially set up
1014 * earlier. */
Petr Machatacbe29c62011-09-27 02:27:58 +02001015 change_process_leader(event->proc, event->proc);
1016 destroy_event_handler(event->proc);
1017
Petr Machatacbe29c62011-09-27 02:27:58 +02001018 default:
1019 ;
1020 }
1021
1022 return event;
1023}
1024
1025void
1026continue_after_vfork(Process * proc)
1027{
1028 debug(DEBUG_PROCESS, "continue_after_vfork: pid=%d", proc->pid);
Petr Machata134a1082011-09-27 20:25:58 +02001029 struct process_vfork_handler * handler = calloc(sizeof(*handler), 1);
Petr Machatacbe29c62011-09-27 02:27:58 +02001030 if (handler == NULL) {
1031 perror("malloc vfork handler");
1032 /* Carry on not bothering to treat the process as
1033 * necessary. */
1034 continue_process(proc->parent->pid);
1035 return;
1036 }
1037
1038 /* We must set up custom event handler, so that we see
1039 * exec/exit events for the task itself. */
Petr Machata134a1082011-09-27 20:25:58 +02001040 handler->super.on_event = process_vfork_on_event;
1041 install_event_handler(proc, &handler->super);
Petr Machatacbe29c62011-09-27 02:27:58 +02001042
1043 /* Make sure that the child is sole thread. */
1044 assert(proc->leader == proc);
1045 assert(proc->next == NULL || proc->next->leader != proc);
1046
1047 /* Make sure that the child's parent is properly set up. */
1048 assert(proc->parent != NULL);
1049 assert(proc->parent->leader != NULL);
1050
1051 change_process_leader(proc, proc->parent->leader);
Petr Machatacbe29c62011-09-27 02:27:58 +02001052}
1053
Petr Machata9d29b3e2011-11-09 16:46:56 +01001054static int
1055is_mid_stopping(Process *proc)
1056{
1057 return proc != NULL
1058 && proc->event_handler != NULL
1059 && proc->event_handler->on_event == &process_stopping_on_event;
1060}
1061
Petr Machata43d2fe52011-11-02 13:25:49 +01001062void
1063continue_after_syscall(Process * proc, int sysnum, int ret_p)
1064{
1065 /* Don't continue if we are mid-stopping. */
Petr Machata9d29b3e2011-11-09 16:46:56 +01001066 if (ret_p && (is_mid_stopping(proc) || is_mid_stopping(proc->leader))) {
1067 debug(DEBUG_PROCESS,
1068 "continue_after_syscall: don't continue %d",
1069 proc->pid);
Petr Machata43d2fe52011-11-02 13:25:49 +01001070 return;
Petr Machata9d29b3e2011-11-09 16:46:56 +01001071 }
Petr Machata43d2fe52011-11-02 13:25:49 +01001072 continue_process(proc->pid);
1073}
1074
Petr Machata602330f2011-07-09 11:15:34 +02001075/* If ltrace gets SIGINT, the processes directly or indirectly run by
1076 * ltrace get it too. We just have to wait long enough for the signal
1077 * to be delivered and the process terminated, which we notice and
1078 * exit ltrace, too. So there's not much we need to do there. We
1079 * want to keep tracing those processes as usual, in case they just
1080 * SIG_IGN the SIGINT to do their shutdown etc.
1081 *
1082 * For processes ran on the background, we want to install an exit
1083 * handler that stops all the threads, removes all breakpoints, and
1084 * detaches.
1085 */
1086void
Petr Machataffe4cd22012-04-11 18:01:44 +02001087os_ltrace_exiting(void)
Petr Machata602330f2011-07-09 11:15:34 +02001088{
1089 struct opt_p_t * it;
1090 for (it = opt_p; it != NULL; it = it->next) {
1091 Process * proc = pid2proc(it->pid);
1092 if (proc == NULL || proc->leader == NULL)
1093 continue;
1094 if (ltrace_exiting_install_handler(proc->leader) < 0)
1095 fprintf(stderr,
1096 "Couldn't install exiting handler for %d.\n",
1097 proc->pid);
1098 }
1099}
1100
Petr Machataffe4cd22012-04-11 18:01:44 +02001101int
1102os_ltrace_exiting_sighandler(void)
1103{
1104 extern int linux_in_waitpid;
1105 if (linux_in_waitpid) {
1106 os_ltrace_exiting();
1107 return 1;
1108 }
1109 return 0;
1110}
1111
Joe Damatodfa3fa32010-11-08 15:47:35 -08001112size_t
1113umovebytes(Process *proc, void *addr, void *laddr, size_t len) {
1114
1115 union {
1116 long a;
1117 char c[sizeof(long)];
1118 } a;
Zachary T Welchba6aca22010-12-08 18:55:09 -08001119 int started = 0;
1120 size_t offset = 0, bytes_read = 0;
Joe Damatodfa3fa32010-11-08 15:47:35 -08001121
1122 while (offset < len) {
1123 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0);
1124 if (a.a == -1 && errno) {
1125 if (started && errno == EIO)
1126 return bytes_read;
1127 else
1128 return -1;
1129 }
1130 started = 1;
1131
1132 if (len - offset >= sizeof(long)) {
1133 memcpy(laddr + offset, &a.c[0], sizeof(long));
1134 bytes_read += sizeof(long);
1135 }
1136 else {
1137 memcpy(laddr + offset, &a.c[0], len - offset);
1138 bytes_read += (len - offset);
1139 }
1140 offset += sizeof(long);
1141 }
1142
1143 return bytes_read;
1144}
1145
Steve Fink7bafff02006-08-07 04:50:42 +02001146/* Read a series of bytes starting at the process's memory address
1147 'addr' and continuing until a NUL ('\0') is seen or 'len' bytes
1148 have been read.
1149*/
Juan Cespedesf1350522008-12-16 18:19:58 +01001150int
Juan Cespedesa8909f72009-04-28 20:02:41 +02001151umovestr(Process *proc, void *addr, int len, void *laddr) {
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001152 union {
1153 long a;
1154 char c[sizeof(long)];
1155 } a;
Zachary T Welchba6aca22010-12-08 18:55:09 -08001156 unsigned i;
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001157 int offset = 0;
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001158
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001159 while (offset < len) {
1160 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0);
1161 for (i = 0; i < sizeof(long); i++) {
Paul Gilliam3f1219f2006-04-24 18:25:38 +02001162 if (a.c[i] && offset + (signed)i < len) {
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001163 *(char *)(laddr + offset + i) = a.c[i];
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001164 } else {
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001165 *(char *)(laddr + offset + i) = '\0';
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001166 return 0;
1167 }
1168 }
1169 offset += sizeof(long);
1170 }
Ian Wienand2d45b1a2006-02-20 22:48:07 +01001171 *(char *)(laddr + offset) = '\0';
Juan Cespedes8cc1b9d2002-03-01 19:54:23 +01001172 return 0;
1173}