blob: fffd9bf70da968436893943790e93090cb4ed489 [file] [log] [blame]
Peter Collingbourne705e3102013-05-21 11:38:39 +00001//===-- sanitizer_stoptheworld_linux_libcdep.cc ---------------------------===//
Alexander Potapenko3614c162013-03-15 14:37:21 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// See sanitizer_stoptheworld.h for details.
11// This implementation was inspired by Markus Gutschke's linuxthreads.cc.
12//
13//===----------------------------------------------------------------------===//
14
Evgeniy Stepanov24e13722013-03-19 14:33:38 +000015
16#include "sanitizer_platform.h"
17#if SANITIZER_LINUX
Alexander Potapenko3614c162013-03-15 14:37:21 +000018
19#include "sanitizer_stoptheworld.h"
20
21#include <errno.h>
22#include <sched.h> // for clone
23#include <stddef.h>
24#include <sys/prctl.h> // for PR_* definitions
25#include <sys/ptrace.h> // for PTRACE_* definitions
26#include <sys/types.h> // for pid_t
Sergey Matveev115accb2013-05-13 10:35:20 +000027#if SANITIZER_ANDROID && defined(__arm__)
Alexey Samsonovbb090b52013-04-03 07:06:10 +000028# include <linux/user.h> // for pt_regs
29#else
30# include <sys/user.h> // for user_regs_struct
31#endif
Alexander Potapenko3614c162013-03-15 14:37:21 +000032#include <sys/wait.h> // for signal-related stuff
33
34#include "sanitizer_common.h"
35#include "sanitizer_libc.h"
36#include "sanitizer_linux.h"
37#include "sanitizer_mutex.h"
Dmitry Vyukov49960be2013-03-18 08:09:42 +000038#include "sanitizer_placement_new.h"
Alexander Potapenko3614c162013-03-15 14:37:21 +000039
40// This module works by spawning a Linux task which then attaches to every
41// thread in the caller process with ptrace. This suspends the threads, and
42// PTRACE_GETREGS can then be used to obtain their register state. The callback
43// supplied to StopTheWorld() is run in the tracer task while the threads are
44// suspended.
45// The tracer task must be placed in a different thread group for ptrace to
46// work, so it cannot be spawned as a pthread. Instead, we use the low-level
47// clone() interface (we want to share the address space with the caller
48// process, so we prefer clone() over fork()).
49//
50// We avoid the use of libc for two reasons:
51// 1. calling a library function while threads are suspended could cause a
52// deadlock, if one of the treads happens to be holding a libc lock;
53// 2. it's generally not safe to call libc functions from the tracer task,
54// because clone() does not set up a thread-local storage for it. Any
55// thread-local variables used by libc will be shared between the tracer task
56// and the thread which spawned it.
57//
58// We deal with this by replacing libc calls with calls to our own
59// implementations defined in sanitizer_libc.h and sanitizer_linux.h. However,
60// there are still some libc functions which are used here:
61//
62// * All of the system calls ultimately go through the libc syscall() function.
63// We're operating under the assumption that syscall()'s implementation does
64// not acquire any locks or use any thread-local data (except for the errno
65// variable, which we handle separately).
66//
67// * We lack custom implementations of sigfillset() and sigaction(), so we use
68// the libc versions instead. The same assumptions as above apply.
69//
70// * It is safe to call libc functions before the cloned thread is spawned or
71// after it has exited. The following functions are used in this manner:
72// sigdelset()
73// sigprocmask()
74// clone()
75
76COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
77
78namespace __sanitizer {
79// This class handles thread suspending/unsuspending in the tracer thread.
80class ThreadSuspender {
81 public:
82 explicit ThreadSuspender(pid_t pid)
83 : pid_(pid) {
84 CHECK_GE(pid, 0);
85 }
86 bool SuspendAllThreads();
87 void ResumeAllThreads();
88 void KillAllThreads();
89 SuspendedThreadsList &suspended_threads_list() {
90 return suspended_threads_list_;
91 }
92 private:
93 SuspendedThreadsList suspended_threads_list_;
94 pid_t pid_;
95 bool SuspendThread(SuspendedThreadID thread_id);
96};
97
98bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
99 // Are we already attached to this thread?
100 // Currently this check takes linear time, however the number of threads is
101 // usually small.
102 if (suspended_threads_list_.Contains(thread_id))
103 return false;
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000104 int pterrno;
105 if (internal_iserror(internal_ptrace(PTRACE_ATTACH, thread_id, NULL, NULL),
106 &pterrno)) {
Alexander Potapenko3614c162013-03-15 14:37:21 +0000107 // Either the thread is dead, or something prevented us from attaching.
108 // Log this event and move on.
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000109 Report("Could not attach to thread %d (errno %d).\n", thread_id, pterrno);
Alexander Potapenko3614c162013-03-15 14:37:21 +0000110 return false;
111 } else {
112 if (SanitizerVerbosity > 0)
113 Report("Attached to thread %d.\n", thread_id);
114 // The thread is not guaranteed to stop before ptrace returns, so we must
115 // wait on it.
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000116 uptr waitpid_status;
Alexander Potapenko3614c162013-03-15 14:37:21 +0000117 HANDLE_EINTR(waitpid_status, internal_waitpid(thread_id, NULL, __WALL));
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000118 int wperrno;
119 if (internal_iserror(waitpid_status, &wperrno)) {
Alexander Potapenko3614c162013-03-15 14:37:21 +0000120 // Got a ECHILD error. I don't think this situation is possible, but it
121 // doesn't hurt to report it.
122 Report("Waiting on thread %d failed, detaching (errno %d).\n", thread_id,
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000123 wperrno);
Alexander Potapenko3614c162013-03-15 14:37:21 +0000124 internal_ptrace(PTRACE_DETACH, thread_id, NULL, NULL);
125 return false;
126 }
127 suspended_threads_list_.Append(thread_id);
128 return true;
129 }
130}
131
132void ThreadSuspender::ResumeAllThreads() {
133 for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) {
134 pid_t tid = suspended_threads_list_.GetThreadID(i);
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000135 int pterrno;
136 if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, NULL, NULL),
137 &pterrno)) {
Alexander Potapenko3614c162013-03-15 14:37:21 +0000138 if (SanitizerVerbosity > 0)
139 Report("Detached from thread %d.\n", tid);
140 } else {
141 // Either the thread is dead, or we are already detached.
142 // The latter case is possible, for instance, if this function was called
143 // from a signal handler.
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000144 Report("Could not detach from thread %d (errno %d).\n", tid, pterrno);
Alexander Potapenko3614c162013-03-15 14:37:21 +0000145 }
146 }
147}
148
149void ThreadSuspender::KillAllThreads() {
150 for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++)
151 internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
152 NULL, NULL);
153}
154
155bool ThreadSuspender::SuspendAllThreads() {
Alexey Samsonov10f3ab72013-04-05 07:41:21 +0000156 ThreadLister thread_lister(pid_);
Alexander Potapenko3614c162013-03-15 14:37:21 +0000157 bool added_threads;
158 do {
159 // Run through the directory entries once.
160 added_threads = false;
Alexey Samsonov10f3ab72013-04-05 07:41:21 +0000161 pid_t tid = thread_lister.GetNextTID();
Alexander Potapenko3614c162013-03-15 14:37:21 +0000162 while (tid >= 0) {
163 if (SuspendThread(tid))
164 added_threads = true;
Alexey Samsonov10f3ab72013-04-05 07:41:21 +0000165 tid = thread_lister.GetNextTID();
Alexander Potapenko3614c162013-03-15 14:37:21 +0000166 }
Alexey Samsonov10f3ab72013-04-05 07:41:21 +0000167 if (thread_lister.error()) {
Alexander Potapenko3614c162013-03-15 14:37:21 +0000168 // Detach threads and fail.
169 ResumeAllThreads();
170 return false;
171 }
Alexey Samsonov10f3ab72013-04-05 07:41:21 +0000172 thread_lister.Reset();
Alexander Potapenko3614c162013-03-15 14:37:21 +0000173 } while (added_threads);
174 return true;
175}
176
177// Pointer to the ThreadSuspender instance for use in signal handler.
178static ThreadSuspender *thread_suspender_instance = NULL;
179
180// Signals that should not be blocked (this is used in the parent thread as well
181// as the tracer thread).
182static const int kUnblockedSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV,
183 SIGBUS, SIGXCPU, SIGXFSZ };
184
185// Structure for passing arguments into the tracer thread.
186struct TracerThreadArgument {
187 StopTheWorldCallback callback;
188 void *callback_argument;
Alexey Samsonov6d036062013-03-18 06:27:13 +0000189 // The tracer thread waits on this mutex while the parent finished its
190 // preparations.
191 BlockingMutex mutex;
Alexander Potapenko3614c162013-03-15 14:37:21 +0000192};
193
194// Signal handler to wake up suspended threads when the tracer thread dies.
195void TracerThreadSignalHandler(int signum, siginfo_t *siginfo, void *) {
196 if (thread_suspender_instance != NULL) {
197 if (signum == SIGABRT)
198 thread_suspender_instance->KillAllThreads();
199 else
200 thread_suspender_instance->ResumeAllThreads();
201 }
202 internal__exit((signum == SIGABRT) ? 1 : 2);
203}
204
Alexander Potapenko3614c162013-03-15 14:37:21 +0000205// Size of alternative stack for signal handlers in the tracer thread.
206static const int kHandlerStackSize = 4096;
207
208// This function will be run as a cloned task.
Alexey Samsonov6d036062013-03-18 06:27:13 +0000209static int TracerThread(void* argument) {
Alexander Potapenko3614c162013-03-15 14:37:21 +0000210 TracerThreadArgument *tracer_thread_argument =
211 (TracerThreadArgument *)argument;
212
213 // Wait for the parent thread to finish preparations.
Alexey Samsonov6d036062013-03-18 06:27:13 +0000214 tracer_thread_argument->mutex.Lock();
215 tracer_thread_argument->mutex.Unlock();
Alexander Potapenko3614c162013-03-15 14:37:21 +0000216
217 ThreadSuspender thread_suspender(internal_getppid());
218 // Global pointer for the signal handler.
219 thread_suspender_instance = &thread_suspender;
220
221 // Alternate stack for signal handling.
222 InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize);
223 struct sigaltstack handler_stack;
224 internal_memset(&handler_stack, 0, sizeof(handler_stack));
225 handler_stack.ss_sp = handler_stack_memory.data();
226 handler_stack.ss_size = kHandlerStackSize;
227 internal_sigaltstack(&handler_stack, NULL);
228
229 // Install our handler for fatal signals. Other signals should be blocked by
230 // the mask we inherited from the caller thread.
231 for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
232 signal_index++) {
233 struct sigaction new_sigaction;
234 internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
235 new_sigaction.sa_sigaction = TracerThreadSignalHandler;
236 new_sigaction.sa_flags = SA_ONSTACK | SA_SIGINFO;
237 sigfillset(&new_sigaction.sa_mask);
238 sigaction(kUnblockedSignals[signal_index], &new_sigaction, NULL);
239 }
240
241 int exit_code = 0;
242 if (!thread_suspender.SuspendAllThreads()) {
243 Report("Failed suspending threads.\n");
244 exit_code = 3;
245 } else {
246 tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
247 tracer_thread_argument->callback_argument);
248 thread_suspender.ResumeAllThreads();
249 exit_code = 0;
250 }
251 thread_suspender_instance = NULL;
252 handler_stack.ss_flags = SS_DISABLE;
253 internal_sigaltstack(&handler_stack, NULL);
254 return exit_code;
255}
256
Alexander Potapenkofd8726c2013-04-01 14:38:56 +0000257class ScopedStackSpaceWithGuard {
258 public:
259 explicit ScopedStackSpaceWithGuard(uptr stack_size) {
260 stack_size_ = stack_size;
261 guard_size_ = GetPageSizeCached();
262 // FIXME: Omitting MAP_STACK here works in current kernels but might break
263 // in the future.
264 guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,
265 "ScopedStackWithGuard");
266 CHECK_EQ(guard_start_, (uptr)Mprotect((uptr)guard_start_, guard_size_));
267 }
268 ~ScopedStackSpaceWithGuard() {
269 UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
270 }
271 void *Bottom() const {
272 return (void *)(guard_start_ + stack_size_ + guard_size_);
273 }
274
275 private:
276 uptr stack_size_;
277 uptr guard_size_;
278 uptr guard_start_;
279};
280
Sergey Matveev5f672c42013-06-19 15:39:13 +0000281NOINLINE static void WipeStack() {
Sergey Matveevc085fe82013-06-19 14:04:11 +0000282 char arr[256];
283 internal_memset(arr, 0, sizeof(arr));
284}
285
Dmitry Vyukov49960be2013-03-18 08:09:42 +0000286static sigset_t blocked_sigset;
287static sigset_t old_sigset;
288static struct sigaction old_sigactions[ARRAY_SIZE(kUnblockedSignals)];
289
Alexander Potapenko3614c162013-03-15 14:37:21 +0000290void StopTheWorld(StopTheWorldCallback callback, void *argument) {
Sergey Matveevc085fe82013-06-19 14:04:11 +0000291 // Glibc's sigaction() has a side-effect where it copies garbage stack values
292 // into oldact, which can cause false negatives in LSan. As a quick workaround
293 // we zero some stack space here.
294 WipeStack();
Alexander Potapenko3614c162013-03-15 14:37:21 +0000295 // Block all signals that can be blocked safely, and install default handlers
296 // for the remaining signals.
297 // We cannot allow user-defined handlers to run while the ThreadSuspender
298 // thread is active, because they could conceivably call some libc functions
299 // which modify errno (which is shared between the two threads).
Alexander Potapenko3614c162013-03-15 14:37:21 +0000300 sigfillset(&blocked_sigset);
Alexander Potapenko3614c162013-03-15 14:37:21 +0000301 for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
302 signal_index++) {
303 // Remove the signal from the set of blocked signals.
304 sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]);
305 // Install the default handler.
306 struct sigaction new_sigaction;
307 internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
308 new_sigaction.sa_handler = SIG_DFL;
309 sigfillset(&new_sigaction.sa_mask);
310 sigaction(kUnblockedSignals[signal_index], &new_sigaction,
311 &old_sigactions[signal_index]);
312 }
Alexander Potapenko3614c162013-03-15 14:37:21 +0000313 int sigprocmask_status = sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
314 CHECK_EQ(sigprocmask_status, 0); // sigprocmask should never fail
315 // Make this process dumpable. Processes that are not dumpable cannot be
316 // attached to.
317 int process_was_dumpable = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
318 if (!process_was_dumpable)
319 internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
Alexander Potapenko3614c162013-03-15 14:37:21 +0000320 // Prepare the arguments for TracerThread.
321 struct TracerThreadArgument tracer_thread_argument;
322 tracer_thread_argument.callback = callback;
323 tracer_thread_argument.callback_argument = argument;
Alexander Potapenkofd8726c2013-04-01 14:38:56 +0000324 const uptr kTracerStackSize = 2 * 1024 * 1024;
325 ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
Alexey Samsonov6d036062013-03-18 06:27:13 +0000326 // Block the execution of TracerThread until after we have set ptrace
327 // permissions.
328 tracer_thread_argument.mutex.Lock();
Alexander Potapenkofd8726c2013-04-01 14:38:56 +0000329 pid_t tracer_pid = clone(TracerThread, tracer_stack.Bottom(),
Alexander Potapenko3614c162013-03-15 14:37:21 +0000330 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
331 &tracer_thread_argument, 0, 0, 0);
332 if (tracer_pid < 0) {
333 Report("Failed spawning a tracer thread (errno %d).\n", errno);
Alexey Samsonov6d036062013-03-18 06:27:13 +0000334 tracer_thread_argument.mutex.Unlock();
Alexander Potapenko3614c162013-03-15 14:37:21 +0000335 } else {
336 // On some systems we have to explicitly declare that we want to be traced
337 // by the tracer thread.
338#ifdef PR_SET_PTRACER
339 internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
340#endif
341 // Allow the tracer thread to start.
Alexey Samsonov6d036062013-03-18 06:27:13 +0000342 tracer_thread_argument.mutex.Unlock();
Alexander Potapenko3614c162013-03-15 14:37:21 +0000343 // Since errno is shared between this thread and the tracer thread, we
344 // must avoid using errno while the tracer thread is running.
345 // At this point, any signal will either be blocked or kill us, so waitpid
346 // should never return (and set errno) while the tracer thread is alive.
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000347 uptr waitpid_status = internal_waitpid(tracer_pid, NULL, __WALL);
348 int wperrno;
349 if (internal_iserror(waitpid_status, &wperrno))
350 Report("Waiting on the tracer thread failed (errno %d).\n", wperrno);
Alexander Potapenko3614c162013-03-15 14:37:21 +0000351 }
352 // Restore the dumpable flag.
353 if (!process_was_dumpable)
354 internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
355 // Restore the signal handlers.
356 for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
357 signal_index++) {
358 sigaction(kUnblockedSignals[signal_index],
359 &old_sigactions[signal_index], NULL);
360 }
361 sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset);
362}
363
Alexander Potapenko53c18d72013-04-01 13:36:42 +0000364// Platform-specific methods from SuspendedThreadsList.
Sergey Matveev115accb2013-05-13 10:35:20 +0000365#if SANITIZER_ANDROID && defined(__arm__)
Alexey Samsonovbb090b52013-04-03 07:06:10 +0000366typedef pt_regs regs_struct;
Sergey Matveev115accb2013-05-13 10:35:20 +0000367#define REG_SP ARM_sp
368
369#elif SANITIZER_LINUX && defined(__arm__)
370typedef user_regs regs_struct;
371#define REG_SP uregs[13]
372
373#elif defined(__i386__) || defined(__x86_64__)
Alexey Samsonovbb090b52013-04-03 07:06:10 +0000374typedef user_regs_struct regs_struct;
Sergey Matveev115accb2013-05-13 10:35:20 +0000375#if defined(__i386__)
376#define REG_SP esp
377#else
378#define REG_SP rsp
Alexey Samsonovbb090b52013-04-03 07:06:10 +0000379#endif
380
Kostya Serebryanyf931da82013-05-15 12:36:29 +0000381#elif defined(__powerpc__) || defined(__powerpc64__)
382typedef pt_regs regs_struct;
383#define REG_SP gpr[PT_R1]
384
Kostya Serebryany40527a52013-06-03 14:49:25 +0000385#elif defined(__mips__)
386typedef struct user regs_struct;
387#define REG_SP regs[EF_REG29]
388
Sergey Matveev115accb2013-05-13 10:35:20 +0000389#else
390#error "Unsupported architecture"
391#endif // SANITIZER_ANDROID && defined(__arm__)
392
Alexander Potapenko53c18d72013-04-01 13:36:42 +0000393int SuspendedThreadsList::GetRegistersAndSP(uptr index,
394 uptr *buffer,
395 uptr *sp) const {
396 pid_t tid = GetThreadID(index);
Alexey Samsonovbb090b52013-04-03 07:06:10 +0000397 regs_struct regs;
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000398 int pterrno;
399 if (internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, NULL, &regs),
400 &pterrno)) {
Alexander Potapenko53c18d72013-04-01 13:36:42 +0000401 Report("Could not get registers from thread %d (errno %d).\n",
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000402 tid, pterrno);
Alexander Potapenko53c18d72013-04-01 13:36:42 +0000403 return -1;
404 }
Sergey Matveev115accb2013-05-13 10:35:20 +0000405
406 *sp = regs.REG_SP;
Alexander Potapenko53c18d72013-04-01 13:36:42 +0000407 internal_memcpy(buffer, &regs, sizeof(regs));
408 return 0;
409}
410
411uptr SuspendedThreadsList::RegisterCount() {
Alexey Samsonovbb090b52013-04-03 07:06:10 +0000412 return sizeof(regs_struct) / sizeof(uptr);
Alexander Potapenko53c18d72013-04-01 13:36:42 +0000413}
Alexander Potapenko3614c162013-03-15 14:37:21 +0000414} // namespace __sanitizer
415
Alexey Samsonov46f93952013-04-03 07:24:35 +0000416#endif // SANITIZER_LINUX