blob: 1084dfac91e1be65e3489ed386aa6de290530241 [file] [log] [blame]
Kostya Kortchinskyb59abb22017-09-26 17:20:02 +00001//===-- scudo_tsd_exclusive.cpp ---------------------------------*- C++ -*-===//
Kostya Kortchinsky36b34342017-04-27 20:21:16 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
Kostya Kortchinskyb59abb22017-09-26 17:20:02 +000010/// Scudo exclusive TSD implementation.
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000011///
12//===----------------------------------------------------------------------===//
13
Kostya Kortchinskyb59abb22017-09-26 17:20:02 +000014#include "scudo_tsd.h"
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000015
Kostya Kortchinskyb59abb22017-09-26 17:20:02 +000016#if SCUDO_TSD_EXCLUSIVE
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000017
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000018namespace __scudo {
19
20static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
21static pthread_key_t PThreadKey;
22
Kostya Kortchinskyee0695762017-05-05 21:38:22 +000023__attribute__((tls_model("initial-exec")))
24THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized;
25__attribute__((tls_model("initial-exec")))
Kostya Kortchinsky39248092017-09-22 15:35:37 +000026THREADLOCAL ScudoTSD TSD;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000027
Kostya Kortchinsky22396c22017-09-25 15:12:08 +000028// Fallback TSD for when the thread isn't initialized yet or is torn down. It
29// can be shared between multiple threads and as such must be locked.
30ScudoTSD FallbackTSD;
31
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000032static void teardownThread(void *Ptr) {
Kostya Kortchinskydb18e4d2017-05-26 15:39:22 +000033 uptr I = reinterpret_cast<uptr>(Ptr);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000034 // The glibc POSIX thread-local-storage deallocation routine calls user
35 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
36 // We want to be called last since other destructors might call free and the
37 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
38 // quarantine and swallowing the cache.
Kostya Kortchinskydb18e4d2017-05-26 15:39:22 +000039 if (I > 1) {
40 // If pthread_setspecific fails, we will go ahead with the teardown.
41 if (LIKELY(pthread_setspecific(PThreadKey,
42 reinterpret_cast<void *>(I - 1)) == 0))
43 return;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000044 }
Kostya Kortchinsky39248092017-09-22 15:35:37 +000045 TSD.commitBack();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000046 ScudoThreadState = ThreadTornDown;
47}
48
49
50static void initOnce() {
51 CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0);
52 initScudo();
Kostya Kortchinsky22396c22017-09-25 15:12:08 +000053 FallbackTSD.init(/*Shared=*/true);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000054}
55
Kostya Kortchinsky040c2112017-09-11 19:59:40 +000056void initThread(bool MinimalInit) {
Kostya Kortchinskydb18e4d2017-05-26 15:39:22 +000057 CHECK_EQ(pthread_once(&GlobalInitialized, initOnce), 0);
Kostya Kortchinsky040c2112017-09-11 19:59:40 +000058 if (UNLIKELY(MinimalInit))
59 return;
Kostya Kortchinskydb18e4d2017-05-26 15:39:22 +000060 CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
61 GetPthreadDestructorIterations())), 0);
Kostya Kortchinsky22396c22017-09-25 15:12:08 +000062 TSD.init(/*Shared=*/false);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000063 ScudoThreadState = ThreadInitialized;
64}
65
66} // namespace __scudo
67
Kostya Kortchinskyb59abb22017-09-26 17:20:02 +000068#endif // SCUDO_TSD_EXCLUSIVE