blob: 1f51cccbcc820dc7955792f3da48f71569d2c89f [file] [log] [blame]
Kostya Kortchinsky36b34342017-04-27 20:21:16 +00001//===-- scudo_tls_linux.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo thread local structure implementation for platforms supporting
11/// thread_local.
12///
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_platform.h"
16
Kostya Kortchinskyee0695762017-05-05 21:38:22 +000017#if SANITIZER_LINUX && !SANITIZER_ANDROID
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000018
19#include "scudo_tls.h"
20
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000021#include <pthread.h>
22
23namespace __scudo {
24
25static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
26static pthread_key_t PThreadKey;
27
Kostya Kortchinskyee0695762017-05-05 21:38:22 +000028__attribute__((tls_model("initial-exec")))
29THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized;
30__attribute__((tls_model("initial-exec")))
Kostya Kortchinsky39248092017-09-22 15:35:37 +000031THREADLOCAL ScudoTSD TSD;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000032
Kostya Kortchinsky22396c22017-09-25 15:12:08 +000033// Fallback TSD for when the thread isn't initialized yet or is torn down. It
34// can be shared between multiple threads and as such must be locked.
35ScudoTSD FallbackTSD;
36
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000037static void teardownThread(void *Ptr) {
Kostya Kortchinskydb18e4d2017-05-26 15:39:22 +000038 uptr I = reinterpret_cast<uptr>(Ptr);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000039 // The glibc POSIX thread-local-storage deallocation routine calls user
40 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
41 // We want to be called last since other destructors might call free and the
42 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
43 // quarantine and swallowing the cache.
Kostya Kortchinskydb18e4d2017-05-26 15:39:22 +000044 if (I > 1) {
45 // If pthread_setspecific fails, we will go ahead with the teardown.
46 if (LIKELY(pthread_setspecific(PThreadKey,
47 reinterpret_cast<void *>(I - 1)) == 0))
48 return;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000049 }
Kostya Kortchinsky39248092017-09-22 15:35:37 +000050 TSD.commitBack();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000051 ScudoThreadState = ThreadTornDown;
52}
53
54
55static void initOnce() {
56 CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0);
57 initScudo();
Kostya Kortchinsky22396c22017-09-25 15:12:08 +000058 FallbackTSD.init(/*Shared=*/true);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000059}
60
Kostya Kortchinsky040c2112017-09-11 19:59:40 +000061void initThread(bool MinimalInit) {
Kostya Kortchinskydb18e4d2017-05-26 15:39:22 +000062 CHECK_EQ(pthread_once(&GlobalInitialized, initOnce), 0);
Kostya Kortchinsky040c2112017-09-11 19:59:40 +000063 if (UNLIKELY(MinimalInit))
64 return;
Kostya Kortchinskydb18e4d2017-05-26 15:39:22 +000065 CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
66 GetPthreadDestructorIterations())), 0);
Kostya Kortchinsky22396c22017-09-25 15:12:08 +000067 TSD.init(/*Shared=*/false);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000068 ScudoThreadState = ThreadInitialized;
69}
70
71} // namespace __scudo
72
Kostya Kortchinskyee0695762017-05-05 21:38:22 +000073#endif // SANITIZER_LINUX && !SANITIZER_ANDROID