blob: ebb3f77fb9924f9b2899db6deb14775b731ab113 [file] [log] [blame]
Alexey Samsonov603c4be2012-06-04 13:55:19 +00001//===-- tsan_mman.cc ------------------------------------------------------===//
Kostya Serebryany7ac41482012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Stephen Hines6a211c52014-07-21 00:49:56 -070013#include "sanitizer_common/sanitizer_allocator_interface.h"
Alexey Samsonovf7667cc2012-06-07 11:54:08 +000014#include "sanitizer_common/sanitizer_common.h"
Dmitry Vyukov2e870512012-08-15 15:35:15 +000015#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000016#include "tsan_mman.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000017#include "tsan_rtl.h"
18#include "tsan_report.h"
19#include "tsan_flags.h"
20
Alexey Samsonov4f0ea392012-09-24 13:19:47 +000021// May be overriden by front-end.
Stephen Hines6a211c52014-07-21 00:49:56 -070022extern "C" void WEAK __sanitizer_malloc_hook(void *ptr, uptr size) {
23 (void)ptr;
24 (void)size;
25}
Alexey Samsonov4f0ea392012-09-24 13:19:47 +000026
Stephen Hines6a211c52014-07-21 00:49:56 -070027extern "C" void WEAK __sanitizer_free_hook(void *ptr) {
28 (void)ptr;
29}
Alexey Samsonov4f0ea392012-09-24 13:19:47 +000030
Kostya Serebryany7ac41482012-05-10 13:48:04 +000031namespace __tsan {
32
Dmitry Vyukove93e5052013-03-18 10:32:21 +000033struct MapUnmapCallback {
34 void OnMap(uptr p, uptr size) const { }
35 void OnUnmap(uptr p, uptr size) const {
36 // We are about to unmap a chunk of user memory.
37 // Mark the corresponding shadow memory as not needed.
Dmitry Vyukov7ac33ac2013-03-18 15:49:07 +000038 DontNeedShadowFor(p, size);
Dmitry Vyukove93e5052013-03-18 10:32:21 +000039 }
40};
41
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +000042static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
43Allocator *allocator() {
Dmitry Vyukov2e870512012-08-15 15:35:15 +000044 return reinterpret_cast<Allocator*>(&allocator_placeholder);
45}
46
47void InitializeAllocator() {
Stephen Hines86277eb2015-03-23 12:06:32 -070048 allocator()->Init(common_flags()->allocator_may_return_null);
Dmitry Vyukov2e870512012-08-15 15:35:15 +000049}
50
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +000051void AllocatorThreadStart(ThreadState *thr) {
52 allocator()->InitCache(&thr->alloc_cache);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000053 internal_allocator()->InitCache(&thr->internal_alloc_cache);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +000054}
55
56void AllocatorThreadFinish(ThreadState *thr) {
57 allocator()->DestroyCache(&thr->alloc_cache);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000058 internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +000059}
60
61void AllocatorPrintStats() {
62 allocator()->PrintStats();
Dmitry Vyukov2e870512012-08-15 15:35:15 +000063}
64
Kostya Serebryany7ac41482012-05-10 13:48:04 +000065static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
Stephen Hines6d186232014-11-26 17:56:19 -080066 if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 ||
67 !flags()->report_signal_unsafe)
Kostya Serebryany7ac41482012-05-10 13:48:04 +000068 return;
Stephen Hines6d186232014-11-26 17:56:19 -080069 VarSizeStackTrace stack;
70 ObtainCurrentStack(thr, pc, &stack);
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +000071 ThreadRegistryLock l(ctx->thread_registry);
Kostya Serebryany7ac41482012-05-10 13:48:04 +000072 ScopedReport rep(ReportTypeSignalUnsafe);
Dmitry Vyukov158c6ac2012-10-05 15:51:32 +000073 if (!IsFiredSuppression(ctx, rep, stack)) {
Stephen Hines6d186232014-11-26 17:56:19 -080074 rep.AddStack(stack, true);
Stephen Hines6a211c52014-07-21 00:49:56 -070075 OutputReport(thr, rep);
Dmitry Vyukov158c6ac2012-10-05 15:51:32 +000076 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +000077}
78
Stephen Hines6d186232014-11-26 17:56:19 -080079void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
Dmitry Vyukov7423c782013-03-22 17:06:22 +000080 if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
Stephen Hines86277eb2015-03-23 12:06:32 -070081 return allocator()->ReturnNullOrDie();
Dmitry Vyukov2e870512012-08-15 15:35:15 +000082 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
83 if (p == 0)
Dmitry Vyukov7b8bee12012-05-18 09:41:52 +000084 return 0;
Stephen Hines6a211c52014-07-21 00:49:56 -070085 if (ctx && ctx->initialized)
86 OnUserAlloc(thr, pc, (uptr)p, sz, true);
Stephen Hines6d186232014-11-26 17:56:19 -080087 if (signal)
88 SignalUnsafeCall(thr, pc);
Kostya Serebryany7ac41482012-05-10 13:48:04 +000089 return p;
90}
91
Stephen Hines86277eb2015-03-23 12:06:32 -070092void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
93 if (CallocShouldReturnNullDueToOverflow(size, n))
94 return allocator()->ReturnNullOrDie();
95 void *p = user_alloc(thr, pc, n * size);
96 if (p)
97 internal_memset(p, 0, n * size);
98 return p;
99}
100
Stephen Hines6d186232014-11-26 17:56:19 -0800101void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
Stephen Hines6a211c52014-07-21 00:49:56 -0700102 if (ctx && ctx->initialized)
103 OnUserFree(thr, pc, (uptr)p, true);
Dmitry Vyukov2e870512012-08-15 15:35:15 +0000104 allocator()->Deallocate(&thr->alloc_cache, p);
Stephen Hines6d186232014-11-26 17:56:19 -0800105 if (signal)
106 SignalUnsafeCall(thr, pc);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000107}
108
Stephen Hines6a211c52014-07-21 00:49:56 -0700109void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
110 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
111 ctx->metamap.AllocBlock(thr, pc, p, sz);
112 if (write && thr->ignore_reads_and_writes == 0)
113 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
114 else
115 MemoryResetRange(thr, pc, (uptr)p, sz);
116}
117
118void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
119 CHECK_NE(p, (void*)0);
120 uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
121 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
122 if (write && thr->ignore_reads_and_writes == 0)
123 MemoryRangeFreed(thr, pc, (uptr)p, sz);
124}
125
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000126void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000127 void *p2 = 0;
128 // FIXME: Handle "shrinking" more efficiently,
129 // it seems that some software actually does this.
130 if (sz) {
131 p2 = user_alloc(thr, pc, sz);
Dmitry Vyukovefd95822012-05-21 06:46:27 +0000132 if (p2 == 0)
133 return 0;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000134 if (p) {
Stephen Hines6a211c52014-07-21 00:49:56 -0700135 uptr oldsz = user_alloc_usable_size(p);
136 internal_memcpy(p2, p, min(oldsz, sz));
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000137 }
138 }
Dmitry Vyukovf51c3862013-03-18 19:47:36 +0000139 if (p)
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000140 user_free(thr, pc, p);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000141 return p2;
142}
143
Stephen Hines6a211c52014-07-21 00:49:56 -0700144uptr user_alloc_usable_size(const void *p) {
Alexey Samsonov8a6b5e52013-02-25 08:43:10 +0000145 if (p == 0)
146 return 0;
Stephen Hines6a211c52014-07-21 00:49:56 -0700147 MBlock *b = ctx->metamap.GetBlock((uptr)p);
148 return b ? b->siz : 0;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000149}
150
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000151void invoke_malloc_hook(void *ptr, uptr size) {
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000152 ThreadState *thr = cur_thread();
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700153 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000154 return;
Stephen Hines6a211c52014-07-21 00:49:56 -0700155 __sanitizer_malloc_hook(ptr, size);
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000156}
157
158void invoke_free_hook(void *ptr) {
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000159 ThreadState *thr = cur_thread();
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700160 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000161 return;
Stephen Hines6a211c52014-07-21 00:49:56 -0700162 __sanitizer_free_hook(ptr);
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000163}
164
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000165void *internal_alloc(MBlockType typ, uptr sz) {
166 ThreadState *thr = cur_thread();
Dmitry Vyukov9ad7c322012-06-22 11:08:55 +0000167 if (thr->nomalloc) {
168 thr->nomalloc = 0; // CHECK calls internal_malloc().
169 CHECK(0);
170 }
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000171 return InternalAlloc(sz, &thr->internal_alloc_cache);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000172}
173
174void internal_free(void *p) {
175 ThreadState *thr = cur_thread();
Dmitry Vyukov9ad7c322012-06-22 11:08:55 +0000176 if (thr->nomalloc) {
177 thr->nomalloc = 0; // CHECK calls internal_malloc().
178 CHECK(0);
179 }
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000180 InternalFree(p, &thr->internal_alloc_cache);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000181}
182
183} // namespace __tsan
Dmitry Vyukov12530822013-01-23 12:08:03 +0000184
185using namespace __tsan;
186
187extern "C" {
Stephen Hines6a211c52014-07-21 00:49:56 -0700188uptr __sanitizer_get_current_allocated_bytes() {
189 uptr stats[AllocatorStatCount];
190 allocator()->GetStats(stats);
191 return stats[AllocatorStatAllocated];
192}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000193
Stephen Hines6a211c52014-07-21 00:49:56 -0700194uptr __sanitizer_get_heap_size() {
195 uptr stats[AllocatorStatCount];
196 allocator()->GetStats(stats);
197 return stats[AllocatorStatMapped];
198}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000199
Stephen Hines6a211c52014-07-21 00:49:56 -0700200uptr __sanitizer_get_free_bytes() {
201 return 1;
202}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000203
Stephen Hines6a211c52014-07-21 00:49:56 -0700204uptr __sanitizer_get_unmapped_bytes() {
205 return 1;
206}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000207
Stephen Hines6a211c52014-07-21 00:49:56 -0700208uptr __sanitizer_get_estimated_allocated_size(uptr size) {
Dmitry Vyukov12530822013-01-23 12:08:03 +0000209 return size;
210}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000211
Stephen Hines6a211c52014-07-21 00:49:56 -0700212int __sanitizer_get_ownership(const void *p) {
213 return allocator()->GetBlockBegin(p) != 0;
214}
Stephen Hines6a211c52014-07-21 00:49:56 -0700215
216uptr __sanitizer_get_allocated_size(const void *p) {
217 return user_alloc_usable_size(p);
218}
Dmitry Vyukov3ce21702013-03-18 17:21:15 +0000219
220void __tsan_on_thread_idle() {
221 ThreadState *thr = cur_thread();
222 allocator()->SwallowCache(&thr->alloc_cache);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000223 internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
Stephen Hines6a211c52014-07-21 00:49:56 -0700224 ctx->metamap.OnThreadIdle(thr);
Dmitry Vyukov3ce21702013-03-18 17:21:15 +0000225}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000226} // extern "C"