blob: 2f9fa81e1ab243f75def488d4d8051bd5b02c68d [file] [log] [blame]
Alexey Samsonov485d3dc2012-06-04 13:50:10 +00001//===-- asan_thread.cc ----------------------------------------------------===//
Kostya Serebryany019b76f2011-11-30 01:07:02 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Thread-related code.
13//===----------------------------------------------------------------------===//
14#include "asan_allocator.h"
15#include "asan_interceptors.h"
Alexey Samsonova88c60b2013-03-28 15:42:43 +000016#include "asan_poisoning.h"
Alexey Samsonov2d3a67b2012-01-17 06:35:31 +000017#include "asan_stack.h"
Kostya Serebryany019b76f2011-11-30 01:07:02 +000018#include "asan_thread.h"
Kostya Serebryany019b76f2011-11-30 01:07:02 +000019#include "asan_mapping.h"
Alexey Samsonov4b1f1032012-06-07 07:13:46 +000020#include "sanitizer_common/sanitizer_common.h"
Alexey Samsonov54afba82013-03-21 11:23:41 +000021#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany96288392013-10-18 14:50:44 +000022#include "sanitizer_common/sanitizer_stackdepot.h"
Kostya Serebryany71788fa2014-01-29 09:29:16 +000023#include "sanitizer_common/sanitizer_tls_get_addr.h"
Sergey Matveev65dd62a2013-05-21 13:40:13 +000024#include "lsan/lsan_common.h"
Kostya Serebryany019b76f2011-11-30 01:07:02 +000025
Kostya Serebryany019b76f2011-11-30 01:07:02 +000026namespace __asan {
27
Alexey Samsonov54afba82013-03-21 11:23:41 +000028// AsanThreadContext implementation.
Kostya Serebryany019b76f2011-11-30 01:07:02 +000029
Sergey Matveeveba518b2014-12-05 17:31:13 +000030struct CreateThreadContextArgs {
31 AsanThread *thread;
32 StackTrace *stack;
33};
34
Alexey Samsonov54afba82013-03-21 11:23:41 +000035void AsanThreadContext::OnCreated(void *arg) {
36 CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
Kostya Serebryany96288392013-10-18 14:50:44 +000037 if (args->stack)
Alexey Samsonov3741ab82014-10-26 06:23:07 +000038 stack_id = StackDepotPut(*args->stack);
Alexey Samsonov54afba82013-03-21 11:23:41 +000039 thread = args->thread;
40 thread->set_context(this);
41}
42
43void AsanThreadContext::OnFinished() {
44 // Drop the link to the AsanThread object.
Vedant Kumar59ba7b82015-10-01 00:22:21 +000045 thread = nullptr;
Alexey Samsonov54afba82013-03-21 11:23:41 +000046}
47
Kostya Serebryanyc1aa0e82013-06-03 14:49:25 +000048// MIPS requires aligned address
Timur Iskhodzhanovbaf90cc2013-06-04 08:25:17 +000049static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
Alexey Samsonov54afba82013-03-21 11:23:41 +000050static ThreadRegistry *asan_thread_registry;
51
Kostya Serebryanyf11e4852013-10-18 15:07:07 +000052static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
53static LowLevelAllocator allocator_for_thread_context;
54
Alexey Samsonov54afba82013-03-21 11:23:41 +000055static ThreadContextBase *GetAsanThreadContext(u32 tid) {
Kostya Serebryanyf11e4852013-10-18 15:07:07 +000056 BlockingMutexLock lock(&mu_for_thread_context);
Peter Collingbourne50cb32e2013-10-24 06:23:39 +000057 return new(allocator_for_thread_context) AsanThreadContext(tid);
Alexey Samsonov54afba82013-03-21 11:23:41 +000058}
59
60ThreadRegistry &asanThreadRegistry() {
61 static bool initialized;
62 // Don't worry about thread_safety - this should be called when there is
63 // a single thread.
64 if (!initialized) {
65 // Never reuse ASan threads: we store pointer to AsanThreadContext
66 // in TSD and can't reliably tell when no more TSD destructors will
67 // be called. It would be wrong to reuse AsanThreadContext for another
68 // thread before all TSD destructors will be called for it.
69 asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry(
70 GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
71 initialized = true;
72 }
73 return *asan_thread_registry;
74}
75
76AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
77 return static_cast<AsanThreadContext *>(
78 asanThreadRegistry().GetThreadLocked(tid));
79}
80
81// AsanThread implementation.
82
Sergey Matveeveba518b2014-12-05 17:31:13 +000083AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
84 u32 parent_tid, StackTrace *stack,
85 bool detached) {
Kostya Serebryanyf22c6972012-11-23 15:38:49 +000086 uptr PageSize = GetPageSizeCached();
87 uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
Joerg Sonnenberger9d09e2f2014-02-26 20:33:22 +000088 AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
Alexey Samsonov2d3a67b2012-01-17 06:35:31 +000089 thread->start_routine_ = start_routine;
90 thread->arg_ = arg;
Sergey Matveeveba518b2014-12-05 17:31:13 +000091 CreateThreadContextArgs args = { thread, stack };
92 asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
93 parent_tid, &args);
Alexey Samsonov2d3a67b2012-01-17 06:35:31 +000094
95 return thread;
Kostya Serebryany019b76f2011-11-30 01:07:02 +000096}
97
Alexey Samsonov54afba82013-03-21 11:23:41 +000098void AsanThread::TSDDtor(void *tsd) {
99 AsanThreadContext *context = (AsanThreadContext*)tsd;
Sergey Matveev9be70fb2013-12-05 12:04:51 +0000100 VReport(1, "T%d TSDDtor\n", context->tid);
Alexey Samsonov54afba82013-03-21 11:23:41 +0000101 if (context->thread)
102 context->thread->Destroy();
Kostya Serebryanyb5eb5a72012-02-07 00:27:15 +0000103}
104
Kostya Serebryany3f4b9bb2012-01-06 19:44:11 +0000105void AsanThread::Destroy() {
Kostya Serebryany7a3a93f2013-12-11 13:54:01 +0000106 int tid = this->tid();
107 VReport(1, "T%d exited\n", tid);
Kostya Serebryanyb5eb5a72012-02-07 00:27:15 +0000108
Kostya Serebryany04a17672013-11-13 13:27:44 +0000109 malloc_storage().CommitBack();
Alexander Potapenkocf4bef32014-01-28 09:28:57 +0000110 if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
Kostya Serebryany7a3a93f2013-12-11 13:54:01 +0000111 asanThreadRegistry().FinishThread(tid);
Alexey Samsonov4b168852013-09-02 08:39:07 +0000112 FlushToDeadThreadStats(&stats_);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000113 // We also clear the shadow on thread destruction because
114 // some code may still be executing in later TSD destructors
115 // and we don't want it to have any poisoned stack.
Sergey Matveev09886cd2013-05-29 13:09:44 +0000116 ClearShadowForThreadStackAndTLS();
Kostya Serebryany7a3a93f2013-12-11 13:54:01 +0000117 DeleteFakeStack(tid);
Kostya Serebryanyf22c6972012-11-23 15:38:49 +0000118 uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
Alexey Samsonov40d5b772012-06-06 16:15:07 +0000119 UnmapOrDie(this, size);
Kostya Serebryany71788fa2014-01-29 09:29:16 +0000120 DTLS_Destroy();
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000121}
122
Dmitry Vyukov47b7c5c2016-06-21 12:29:18 +0000123void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
124 uptr size) {
125 if (atomic_load(&stack_switching_, memory_order_relaxed)) {
126 Report("ERROR: starting fiber switch while in fiber switch\n");
127 Die();
128 }
129
130 next_stack_bottom_ = bottom;
131 next_stack_top_ = bottom + size;
132 atomic_store(&stack_switching_, 1, memory_order_release);
133
134 FakeStack *current_fake_stack = fake_stack_;
135 if (fake_stack_save)
136 *fake_stack_save = fake_stack_;
137 fake_stack_ = nullptr;
138 SetTLSFakeStack(nullptr);
139 // if fake_stack_save is null, the fiber will die, delete the fakestack
140 if (!fake_stack_save && current_fake_stack)
141 current_fake_stack->Destroy(this->tid());
142}
143
Dmitry Vyukovb3587832016-09-28 12:28:16 +0000144void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
145 uptr *bottom_old,
146 uptr *size_old) {
Dmitry Vyukov47b7c5c2016-06-21 12:29:18 +0000147 if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
148 Report("ERROR: finishing a fiber switch that has not started\n");
149 Die();
150 }
151
152 if (fake_stack_save) {
153 SetTLSFakeStack(fake_stack_save);
154 fake_stack_ = fake_stack_save;
155 }
156
Dmitry Vyukovb3587832016-09-28 12:28:16 +0000157 if (bottom_old)
158 *bottom_old = stack_bottom_;
159 if (size_old)
160 *size_old = stack_top_ - stack_bottom_;
Dmitry Vyukov47b7c5c2016-06-21 12:29:18 +0000161 stack_bottom_ = next_stack_bottom_;
162 stack_top_ = next_stack_top_;
163 atomic_store(&stack_switching_, 0, memory_order_release);
164 next_stack_top_ = 0;
165 next_stack_bottom_ = 0;
166}
167
168inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
169 if (!atomic_load(&stack_switching_, memory_order_acquire))
170 return StackBounds{stack_bottom_, stack_top_}; // NOLINT
171 char local;
172 const uptr cur_stack = (uptr)&local;
173 // Note: need to check next stack first, because FinishSwitchFiber
174 // may be in process of overwriting stack_top_/bottom_. But in such case
175 // we are already on the next stack.
176 if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
177 return StackBounds{next_stack_bottom_, next_stack_top_}; // NOLINT
178 return StackBounds{stack_bottom_, stack_top_}; // NOLINT
179}
180
181uptr AsanThread::stack_top() {
182 return GetStackBounds().top;
183}
184
185uptr AsanThread::stack_bottom() {
186 return GetStackBounds().bottom;
187}
188
189uptr AsanThread::stack_size() {
190 const auto bounds = GetStackBounds();
191 return bounds.top - bounds.bottom;
192}
193
Kostya Serebryany628cda72013-09-12 08:34:50 +0000194// We want to create the FakeStack lazyly on the first use, but not eralier
195// than the stack size is known and the procedure has to be async-signal safe.
196FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
197 uptr stack_size = this->stack_size();
198 if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000199 return nullptr;
Kostya Serebryany628cda72013-09-12 08:34:50 +0000200 uptr old_val = 0;
201 // fake_stack_ has 3 states:
202 // 0 -- not initialized
203 // 1 -- being initialized
204 // ptr -- initialized
205 // This CAS checks if the state was 0 and if so changes it to state 1,
Alp Toker1ee7fc72014-05-15 02:22:34 +0000206 // if that was successful, it initializes the pointer.
Kostya Serebryany628cda72013-09-12 08:34:50 +0000207 if (atomic_compare_exchange_strong(
208 reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
Kostya Serebryany43c44932013-09-13 06:32:26 +0000209 memory_order_relaxed)) {
Kostya Serebryanyc3d43ca2013-09-18 10:35:12 +0000210 uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
Kostya Serebryany1aedf6c2013-12-16 08:42:08 +0000211 CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
212 stack_size_log =
213 Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
214 stack_size_log =
215 Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
Kostya Serebryanyc3d43ca2013-09-18 10:35:12 +0000216 fake_stack_ = FakeStack::Create(stack_size_log);
Kostya Serebryany43c44932013-09-13 06:32:26 +0000217 SetTLSFakeStack(fake_stack_);
218 return fake_stack_;
219 }
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000220 return nullptr;
Kostya Serebryany628cda72013-09-12 08:34:50 +0000221}
222
Kostya Serebryany6bb2f1d2011-12-16 19:13:35 +0000223void AsanThread::Init() {
Dmitry Vyukov47b7c5c2016-06-21 12:29:18 +0000224 next_stack_top_ = next_stack_bottom_ = 0;
225 atomic_store(&stack_switching_, false, memory_order_release);
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000226 fake_stack_ = nullptr; // Will be initialized lazily if needed.
Kostya Serebryany558b3362014-06-06 07:35:35 +0000227 CHECK_EQ(this->stack_size(), 0U);
Sergey Matveev09886cd2013-05-29 13:09:44 +0000228 SetThreadStackAndTls();
Kostya Serebryany558b3362014-06-06 07:35:35 +0000229 CHECK_GT(this->stack_size(), 0U);
Alexey Samsonov2d3a67b2012-01-17 06:35:31 +0000230 CHECK(AddrIsInMem(stack_bottom_));
Kostya Serebryany63c36bb2013-01-18 11:30:36 +0000231 CHECK(AddrIsInMem(stack_top_ - 1));
Sergey Matveev09886cd2013-05-29 13:09:44 +0000232 ClearShadowForThreadStackAndTLS();
Sergey Matveev9be70fb2013-12-05 12:04:51 +0000233 int local = 0;
234 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
235 (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
236 &local);
Kostya Serebryany6bb2f1d2011-12-16 19:13:35 +0000237}
238
Sergey Matveevb029c512014-12-05 00:10:15 +0000239thread_return_t AsanThread::ThreadStart(
240 uptr os_id, atomic_uintptr_t *signal_thread_is_registered) {
Kostya Serebryany6bb2f1d2011-12-16 19:13:35 +0000241 Init();
Kuba Mracekbba1d402017-02-02 12:54:21 +0000242 asanThreadRegistry().StartThread(tid(), os_id, /*workerthread*/ false,
243 nullptr);
Sergey Matveevb029c512014-12-05 00:10:15 +0000244 if (signal_thread_is_registered)
245 atomic_store(signal_thread_is_registered, 1, memory_order_release);
246
Alexander Potapenkocf4bef32014-01-28 09:28:57 +0000247 if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000248
249 if (!start_routine_) {
Kostya Serebryany8d032042012-05-31 14:35:53 +0000250 // start_routine_ == 0 if we're on the main thread or on one of the
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000251 // OS X libdispatch worker threads. But nobody is supposed to call
252 // ThreadStart() for the worker threads.
Kostya Serebryany5b4267f2013-04-05 14:40:25 +0000253 CHECK_EQ(tid(), 0);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000254 return 0;
255 }
256
Timur Iskhodzhanov0f9c9a52012-02-24 15:28:43 +0000257 thread_return_t res = start_routine_(arg_);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000258
Sergey Matveevda9f5e72013-10-14 12:01:05 +0000259 // On POSIX systems we defer this to the TSD destructor. LSan will consider
260 // the thread's memory as non-live from the moment we call Destroy(), even
261 // though that memory might contain pointers to heap objects which will be
262 // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
263 // the TSD destructors have run might cause false positives in LSan.
264 if (!SANITIZER_POSIX)
265 this->Destroy();
Kostya Serebryany332923b2012-01-11 02:03:16 +0000266
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000267 return res;
268}
269
Sergey Matveev09886cd2013-05-29 13:09:44 +0000270void AsanThread::SetThreadStackAndTls() {
Kostya Serebryanyf8bbdfa2013-09-19 14:59:52 +0000271 uptr tls_size = 0;
Dmitry Vyukov47b7c5c2016-06-21 12:29:18 +0000272 uptr stack_size = 0;
273 GetThreadStackAndTls(tid() == 0, const_cast<uptr *>(&stack_bottom_),
274 const_cast<uptr *>(&stack_size), &tls_begin_, &tls_size);
275 stack_top_ = stack_bottom_ + stack_size;
Sergey Matveev09886cd2013-05-29 13:09:44 +0000276 tls_end_ = tls_begin_ + tls_size;
Alexey Samsonov5535c512016-01-14 18:50:09 +0000277 dtls_ = DTLS_Get();
Sergey Matveev09886cd2013-05-29 13:09:44 +0000278
Alexey Samsonov4b1f1032012-06-07 07:13:46 +0000279 int local;
280 CHECK(AddrIsInStack((uptr)&local));
281}
282
Sergey Matveev09886cd2013-05-29 13:09:44 +0000283void AsanThread::ClearShadowForThreadStackAndTLS() {
Alexey Samsonov2d3a67b2012-01-17 06:35:31 +0000284 PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
Sergey Matveev09886cd2013-05-29 13:09:44 +0000285 if (tls_begin_ != tls_end_)
286 PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0);
Alexey Samsonov2d3a67b2012-01-17 06:35:31 +0000287}
288
Alexey Samsonov0470e242014-10-01 21:13:00 +0000289bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
290 StackFrameAccess *access) {
Kostya Serebryany8d032042012-05-31 14:35:53 +0000291 uptr bottom = 0;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000292 if (AddrIsInStack(addr)) {
293 bottom = stack_bottom();
Kostya Serebryany736bd082013-09-12 08:43:44 +0000294 } else if (has_fake_stack()) {
Kostya Serebryany6a068a72013-06-26 12:16:05 +0000295 bottom = fake_stack()->AddrIsInFakeStack(addr);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000296 CHECK(bottom);
Alexey Samsonov0470e242014-10-01 21:13:00 +0000297 access->offset = addr - bottom;
298 access->frame_pc = ((uptr*)bottom)[2];
299 access->frame_descr = (const char *)((uptr*)bottom)[1];
300 return true;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000301 }
Kostya Serebryany734f1eb2012-11-21 12:38:58 +0000302 uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr.
Kostya Serebryany1d35d152012-05-31 15:02:07 +0000303 u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
304 u8 *shadow_bottom = (u8*)MemToShadow(bottom);
Evgeniy Stepanovd989be12012-05-12 12:33:10 +0000305
306 while (shadow_ptr >= shadow_bottom &&
Alexander Potapenkobcc00a42012-11-15 15:24:42 +0000307 *shadow_ptr != kAsanStackLeftRedzoneMagic) {
Evgeniy Stepanovd989be12012-05-12 12:33:10 +0000308 shadow_ptr--;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000309 }
Evgeniy Stepanovd989be12012-05-12 12:33:10 +0000310
311 while (shadow_ptr >= shadow_bottom &&
Alexander Potapenkobcc00a42012-11-15 15:24:42 +0000312 *shadow_ptr == kAsanStackLeftRedzoneMagic) {
Evgeniy Stepanovd989be12012-05-12 12:33:10 +0000313 shadow_ptr--;
314 }
315
316 if (shadow_ptr < shadow_bottom) {
Alexey Samsonov0470e242014-10-01 21:13:00 +0000317 return false;
Evgeniy Stepanovd989be12012-05-12 12:33:10 +0000318 }
319
Kostya Serebryany8d032042012-05-31 14:35:53 +0000320 uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1));
Alexander Potapenkobcc00a42012-11-15 15:24:42 +0000321 CHECK(ptr[0] == kCurrentStackFrameMagic);
Alexey Samsonov0470e242014-10-01 21:13:00 +0000322 access->offset = addr - (uptr)ptr;
323 access->frame_pc = ptr[2];
324 access->frame_descr = (const char*)ptr[1];
325 return true;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000326}
327
Dmitry Vyukov47b7c5c2016-06-21 12:29:18 +0000328bool AsanThread::AddrIsInStack(uptr addr) {
329 const auto bounds = GetStackBounds();
330 return addr >= bounds.bottom && addr < bounds.top;
331}
332
Alexey Samsonov54afba82013-03-21 11:23:41 +0000333static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
334 void *addr) {
335 AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
336 AsanThread *t = tctx->thread;
Kostya Serebryany6a068a72013-06-26 12:16:05 +0000337 if (!t) return false;
338 if (t->AddrIsInStack((uptr)addr)) return true;
Kostya Serebryany44441cc2013-09-12 08:47:00 +0000339 if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr))
Kostya Serebryany6a068a72013-06-26 12:16:05 +0000340 return true;
341 return false;
Alexey Samsonov54afba82013-03-21 11:23:41 +0000342}
343
Alexey Samsonovcf025cb2013-03-20 09:23:28 +0000344AsanThread *GetCurrentThread() {
Sergey Matveevbdeff952013-07-08 12:57:24 +0000345 AsanThreadContext *context =
346 reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
Alexey Samsonov54afba82013-03-21 11:23:41 +0000347 if (!context) {
348 if (SANITIZER_ANDROID) {
349 // On Android, libc constructor is called _after_ asan_init, and cleans up
350 // TSD. Try to figure out if this is still the main thread by the stack
351 // address. We are not entirely sure that we have correct main thread
Dmitry Vyukova7e42b52013-03-22 07:29:59 +0000352 // limits, so only do this magic on Android, and only if the found thread
353 // is the main thread.
Alexey Samsonov54afba82013-03-21 11:23:41 +0000354 AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
Evgeniy Stepanov3989c9f2016-09-07 22:57:06 +0000355 if (tctx && ThreadStackContainsAddress(tctx, &context)) {
Alexey Samsonov54afba82013-03-21 11:23:41 +0000356 SetCurrentThread(tctx->thread);
357 return tctx->thread;
358 }
Alexey Samsonovcf025cb2013-03-20 09:23:28 +0000359 }
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000360 return nullptr;
Alexey Samsonovcf025cb2013-03-20 09:23:28 +0000361 }
Alexey Samsonov54afba82013-03-21 11:23:41 +0000362 return context->thread;
Alexey Samsonovcf025cb2013-03-20 09:23:28 +0000363}
364
365void SetCurrentThread(AsanThread *t) {
Alexey Samsonov54afba82013-03-21 11:23:41 +0000366 CHECK(t->context());
Sergey Matveev9be70fb2013-12-05 12:04:51 +0000367 VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
368 (void *)GetThreadSelf());
Alexey Samsonovcf025cb2013-03-20 09:23:28 +0000369 // Make sure we do not reset the current AsanThread.
Alexey Samsonov54afba82013-03-21 11:23:41 +0000370 CHECK_EQ(0, AsanTSDGet());
371 AsanTSDSet(t->context());
372 CHECK_EQ(t->context(), AsanTSDGet());
Alexey Samsonovcf025cb2013-03-20 09:23:28 +0000373}
374
375u32 GetCurrentTidOrInvalid() {
376 AsanThread *t = GetCurrentThread();
377 return t ? t->tid() : kInvalidTid;
378}
379
Alexey Samsonov54afba82013-03-21 11:23:41 +0000380AsanThread *FindThreadByStackAddress(uptr addr) {
381 asanThreadRegistry().CheckLocked();
382 AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
383 asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
384 (void *)addr));
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000385 return tctx ? tctx->thread : nullptr;
Alexey Samsonov54afba82013-03-21 11:23:41 +0000386}
Sergey Matveevbdeff952013-07-08 12:57:24 +0000387
388void EnsureMainThreadIDIsCorrect() {
389 AsanThreadContext *context =
390 reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
391 if (context && (context->tid == 0))
392 context->os_id = GetTid();
393}
Sergey Matveev43d90cb2013-10-14 14:04:50 +0000394
395__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
396 __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
397 __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000398 if (!context) return nullptr;
Sergey Matveev43d90cb2013-10-14 14:04:50 +0000399 return context->thread;
400}
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000401} // namespace __asan
Sergey Matveev65dd62a2013-05-21 13:40:13 +0000402
403// --- Implementation of LSan-specific functions --- {{{1
404namespace __lsan {
405bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
Alexey Samsonov5535c512016-01-14 18:50:09 +0000406 uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
407 uptr *cache_end, DTLS **dtls) {
Sergey Matveev43d90cb2013-10-14 14:04:50 +0000408 __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
Sergey Matveev09886cd2013-05-29 13:09:44 +0000409 if (!t) return false;
410 *stack_begin = t->stack_bottom();
411 *stack_end = t->stack_top();
412 *tls_begin = t->tls_begin();
413 *tls_end = t->tls_end();
414 // ASan doesn't keep allocator caches in TLS, so these are unused.
415 *cache_begin = 0;
416 *cache_end = 0;
Alexey Samsonov5535c512016-01-14 18:50:09 +0000417 *dtls = t->dtls();
Sergey Matveev09886cd2013-05-29 13:09:44 +0000418 return true;
Sergey Matveev65dd62a2013-05-21 13:40:13 +0000419}
420
Sergey Matveev43d90cb2013-10-14 14:04:50 +0000421void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
422 void *arg) {
423 __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
424 if (t && t->has_fake_stack())
425 t->fake_stack()->ForEachFakeFrame(callback, arg);
426}
427
Sergey Matveev65dd62a2013-05-21 13:40:13 +0000428void LockThreadRegistry() {
429 __asan::asanThreadRegistry().Lock();
430}
431
432void UnlockThreadRegistry() {
433 __asan::asanThreadRegistry().Unlock();
434}
Sergey Matveevbdeff952013-07-08 12:57:24 +0000435
436void EnsureMainThreadIDIsCorrect() {
437 __asan::EnsureMainThreadIDIsCorrect();
438}
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000439} // namespace __lsan
Dmitry Vyukov47b7c5c2016-06-21 12:29:18 +0000440
441// ---------------------- Interface ---------------- {{{1
442using namespace __asan; // NOLINT
443
444extern "C" {
445SANITIZER_INTERFACE_ATTRIBUTE
446void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
447 uptr size) {
448 AsanThread *t = GetCurrentThread();
449 if (!t) {
450 VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
451 return;
452 }
453 t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
454}
455
456SANITIZER_INTERFACE_ATTRIBUTE
Dmitry Vyukovb3587832016-09-28 12:28:16 +0000457void __sanitizer_finish_switch_fiber(void* fakestack,
458 const void **bottom_old,
459 uptr *size_old) {
Dmitry Vyukov47b7c5c2016-06-21 12:29:18 +0000460 AsanThread *t = GetCurrentThread();
461 if (!t) {
462 VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
463 return;
464 }
Dmitry Vyukovb3587832016-09-28 12:28:16 +0000465 t->FinishSwitchFiber((FakeStack*)fakestack,
466 (uptr*)bottom_old,
467 (uptr*)size_old);
Dmitry Vyukov47b7c5c2016-06-21 12:29:18 +0000468}
469}