blob: 7247c6e00035b14ac0bd9b48df956544f9ecf1cb [file] [log] [blame]
Alexey Samsonov603c4be2012-06-04 13:55:19 +00001//===-- tsan_mman.cc ------------------------------------------------------===//
Kostya Serebryany7ac41482012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Stephen Hines6a211c52014-07-21 00:49:56 -070013#include "sanitizer_common/sanitizer_allocator_interface.h"
Alexey Samsonovf7667cc2012-06-07 11:54:08 +000014#include "sanitizer_common/sanitizer_common.h"
Dmitry Vyukov2e870512012-08-15 15:35:15 +000015#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000016#include "tsan_mman.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000017#include "tsan_rtl.h"
18#include "tsan_report.h"
19#include "tsan_flags.h"
20
Alexey Samsonov4f0ea392012-09-24 13:19:47 +000021// May be overriden by front-end.
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080022SANITIZER_WEAK_DEFAULT_IMPL
23void __sanitizer_malloc_hook(void *ptr, uptr size) {
Stephen Hines6a211c52014-07-21 00:49:56 -070024 (void)ptr;
25 (void)size;
26}
Alexey Samsonov4f0ea392012-09-24 13:19:47 +000027
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080028SANITIZER_WEAK_DEFAULT_IMPL
29void __sanitizer_free_hook(void *ptr) {
Stephen Hines6a211c52014-07-21 00:49:56 -070030 (void)ptr;
31}
Alexey Samsonov4f0ea392012-09-24 13:19:47 +000032
Kostya Serebryany7ac41482012-05-10 13:48:04 +000033namespace __tsan {
34
Dmitry Vyukove93e5052013-03-18 10:32:21 +000035struct MapUnmapCallback {
36 void OnMap(uptr p, uptr size) const { }
37 void OnUnmap(uptr p, uptr size) const {
38 // We are about to unmap a chunk of user memory.
39 // Mark the corresponding shadow memory as not needed.
Dmitry Vyukov7ac33ac2013-03-18 15:49:07 +000040 DontNeedShadowFor(p, size);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080041 // Mark the corresponding meta shadow memory as not needed.
42 // Note the block does not contain any meta info at this point
43 // (this happens after free).
44 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
45 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
46 // Block came from LargeMmapAllocator, so must be large.
47 // We rely on this in the calculations below.
48 CHECK_GE(size, 2 * kPageSize);
49 uptr diff = RoundUp(p, kPageSize) - p;
50 if (diff != 0) {
51 p += diff;
52 size -= diff;
53 }
54 diff = p + size - RoundDown(p + size, kPageSize);
55 if (diff != 0)
56 size -= diff;
57 FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio);
Dmitry Vyukove93e5052013-03-18 10:32:21 +000058 }
59};
60
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +000061static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
62Allocator *allocator() {
Dmitry Vyukov2e870512012-08-15 15:35:15 +000063 return reinterpret_cast<Allocator*>(&allocator_placeholder);
64}
65
66void InitializeAllocator() {
Stephen Hines86277eb2015-03-23 12:06:32 -070067 allocator()->Init(common_flags()->allocator_may_return_null);
Dmitry Vyukov2e870512012-08-15 15:35:15 +000068}
69
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +000070void AllocatorThreadStart(ThreadState *thr) {
71 allocator()->InitCache(&thr->alloc_cache);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000072 internal_allocator()->InitCache(&thr->internal_alloc_cache);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +000073}
74
75void AllocatorThreadFinish(ThreadState *thr) {
76 allocator()->DestroyCache(&thr->alloc_cache);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000077 internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +000078}
79
80void AllocatorPrintStats() {
81 allocator()->PrintStats();
Dmitry Vyukov2e870512012-08-15 15:35:15 +000082}
83
Kostya Serebryany7ac41482012-05-10 13:48:04 +000084static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080085 if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
Stephen Hines6d186232014-11-26 17:56:19 -080086 !flags()->report_signal_unsafe)
Kostya Serebryany7ac41482012-05-10 13:48:04 +000087 return;
Stephen Hines6d186232014-11-26 17:56:19 -080088 VarSizeStackTrace stack;
89 ObtainCurrentStack(thr, pc, &stack);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080090 if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
91 return;
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +000092 ThreadRegistryLock l(ctx->thread_registry);
Kostya Serebryany7ac41482012-05-10 13:48:04 +000093 ScopedReport rep(ReportTypeSignalUnsafe);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080094 rep.AddStack(stack, true);
95 OutputReport(thr, rep);
Kostya Serebryany7ac41482012-05-10 13:48:04 +000096}
97
Stephen Hines6d186232014-11-26 17:56:19 -080098void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
Dmitry Vyukov7423c782013-03-22 17:06:22 +000099 if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
Stephen Hines86277eb2015-03-23 12:06:32 -0700100 return allocator()->ReturnNullOrDie();
Dmitry Vyukov2e870512012-08-15 15:35:15 +0000101 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
102 if (p == 0)
Dmitry Vyukov7b8bee12012-05-18 09:41:52 +0000103 return 0;
Stephen Hines6a211c52014-07-21 00:49:56 -0700104 if (ctx && ctx->initialized)
105 OnUserAlloc(thr, pc, (uptr)p, sz, true);
Stephen Hines6d186232014-11-26 17:56:19 -0800106 if (signal)
107 SignalUnsafeCall(thr, pc);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000108 return p;
109}
110
Stephen Hines86277eb2015-03-23 12:06:32 -0700111void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
112 if (CallocShouldReturnNullDueToOverflow(size, n))
113 return allocator()->ReturnNullOrDie();
114 void *p = user_alloc(thr, pc, n * size);
115 if (p)
116 internal_memset(p, 0, n * size);
117 return p;
118}
119
Stephen Hines6d186232014-11-26 17:56:19 -0800120void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
Stephen Hines6a211c52014-07-21 00:49:56 -0700121 if (ctx && ctx->initialized)
122 OnUserFree(thr, pc, (uptr)p, true);
Dmitry Vyukov2e870512012-08-15 15:35:15 +0000123 allocator()->Deallocate(&thr->alloc_cache, p);
Stephen Hines6d186232014-11-26 17:56:19 -0800124 if (signal)
125 SignalUnsafeCall(thr, pc);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000126}
127
Stephen Hines6a211c52014-07-21 00:49:56 -0700128void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
129 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
130 ctx->metamap.AllocBlock(thr, pc, p, sz);
131 if (write && thr->ignore_reads_and_writes == 0)
132 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
133 else
134 MemoryResetRange(thr, pc, (uptr)p, sz);
135}
136
137void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
138 CHECK_NE(p, (void*)0);
139 uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
140 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
141 if (write && thr->ignore_reads_and_writes == 0)
142 MemoryRangeFreed(thr, pc, (uptr)p, sz);
143}
144
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000145void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000146 void *p2 = 0;
147 // FIXME: Handle "shrinking" more efficiently,
148 // it seems that some software actually does this.
149 if (sz) {
150 p2 = user_alloc(thr, pc, sz);
Dmitry Vyukovefd95822012-05-21 06:46:27 +0000151 if (p2 == 0)
152 return 0;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000153 if (p) {
Stephen Hines6a211c52014-07-21 00:49:56 -0700154 uptr oldsz = user_alloc_usable_size(p);
155 internal_memcpy(p2, p, min(oldsz, sz));
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000156 }
157 }
Dmitry Vyukovf51c3862013-03-18 19:47:36 +0000158 if (p)
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000159 user_free(thr, pc, p);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000160 return p2;
161}
162
Stephen Hines6a211c52014-07-21 00:49:56 -0700163uptr user_alloc_usable_size(const void *p) {
Alexey Samsonov8a6b5e52013-02-25 08:43:10 +0000164 if (p == 0)
165 return 0;
Stephen Hines6a211c52014-07-21 00:49:56 -0700166 MBlock *b = ctx->metamap.GetBlock((uptr)p);
167 return b ? b->siz : 0;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000168}
169
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000170void invoke_malloc_hook(void *ptr, uptr size) {
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000171 ThreadState *thr = cur_thread();
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700172 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000173 return;
Stephen Hines6a211c52014-07-21 00:49:56 -0700174 __sanitizer_malloc_hook(ptr, size);
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000175}
176
177void invoke_free_hook(void *ptr) {
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000178 ThreadState *thr = cur_thread();
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700179 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000180 return;
Stephen Hines6a211c52014-07-21 00:49:56 -0700181 __sanitizer_free_hook(ptr);
Alexey Samsonov4f0ea392012-09-24 13:19:47 +0000182}
183
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000184void *internal_alloc(MBlockType typ, uptr sz) {
185 ThreadState *thr = cur_thread();
Dmitry Vyukov9ad7c322012-06-22 11:08:55 +0000186 if (thr->nomalloc) {
187 thr->nomalloc = 0; // CHECK calls internal_malloc().
188 CHECK(0);
189 }
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000190 return InternalAlloc(sz, &thr->internal_alloc_cache);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000191}
192
193void internal_free(void *p) {
194 ThreadState *thr = cur_thread();
Dmitry Vyukov9ad7c322012-06-22 11:08:55 +0000195 if (thr->nomalloc) {
196 thr->nomalloc = 0; // CHECK calls internal_malloc().
197 CHECK(0);
198 }
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000199 InternalFree(p, &thr->internal_alloc_cache);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000200}
201
202} // namespace __tsan
Dmitry Vyukov12530822013-01-23 12:08:03 +0000203
204using namespace __tsan;
205
206extern "C" {
Stephen Hines6a211c52014-07-21 00:49:56 -0700207uptr __sanitizer_get_current_allocated_bytes() {
208 uptr stats[AllocatorStatCount];
209 allocator()->GetStats(stats);
210 return stats[AllocatorStatAllocated];
211}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000212
Stephen Hines6a211c52014-07-21 00:49:56 -0700213uptr __sanitizer_get_heap_size() {
214 uptr stats[AllocatorStatCount];
215 allocator()->GetStats(stats);
216 return stats[AllocatorStatMapped];
217}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000218
Stephen Hines6a211c52014-07-21 00:49:56 -0700219uptr __sanitizer_get_free_bytes() {
220 return 1;
221}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000222
Stephen Hines6a211c52014-07-21 00:49:56 -0700223uptr __sanitizer_get_unmapped_bytes() {
224 return 1;
225}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000226
Stephen Hines6a211c52014-07-21 00:49:56 -0700227uptr __sanitizer_get_estimated_allocated_size(uptr size) {
Dmitry Vyukov12530822013-01-23 12:08:03 +0000228 return size;
229}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000230
Stephen Hines6a211c52014-07-21 00:49:56 -0700231int __sanitizer_get_ownership(const void *p) {
232 return allocator()->GetBlockBegin(p) != 0;
233}
Stephen Hines6a211c52014-07-21 00:49:56 -0700234
235uptr __sanitizer_get_allocated_size(const void *p) {
236 return user_alloc_usable_size(p);
237}
Dmitry Vyukov3ce21702013-03-18 17:21:15 +0000238
239void __tsan_on_thread_idle() {
240 ThreadState *thr = cur_thread();
241 allocator()->SwallowCache(&thr->alloc_cache);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000242 internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
Stephen Hines6a211c52014-07-21 00:49:56 -0700243 ctx->metamap.OnThreadIdle(thr);
Dmitry Vyukov3ce21702013-03-18 17:21:15 +0000244}
Dmitry Vyukov12530822013-01-23 12:08:03 +0000245} // extern "C"