blob: 96d5cb6a9c25704994ebcf7b5e454fe7d739e848 [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
Sergey Matveev08347ca2014-08-26 14:28:28 +000018#include "sanitizer_common/sanitizer_allocator_interface.h"
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +000019#include "sanitizer_common/sanitizer_errno.h"
Sergey Matveev866abfb2013-05-20 10:54:00 +000020#include "sanitizer_common/sanitizer_internal_defs.h"
21#include "sanitizer_common/sanitizer_stackdepot.h"
22#include "sanitizer_common/sanitizer_stacktrace.h"
23#include "lsan_common.h"
24
Sergey Matveev10548682013-11-24 14:28:18 +000025extern "C" void *memset(void *ptr, int value, uptr num);
26
Sergey Matveev866abfb2013-05-20 10:54:00 +000027namespace __lsan {
Maxim Ostapenkode3b9a22017-04-11 14:58:26 +000028#if defined(__i386__) || defined(__arm__)
Maxim Ostapenko651cfe32017-01-31 07:15:37 +000029static const uptr kMaxAllowedMallocSize = 1UL << 30;
Francis Riccid668a012017-03-27 14:07:50 +000030#elif defined(__mips64) || defined(__aarch64__)
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000031static const uptr kMaxAllowedMallocSize = 4UL << 30;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000032#else
33static const uptr kMaxAllowedMallocSize = 8UL << 30;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000034#endif
Sergey Matveev866abfb2013-05-20 10:54:00 +000035typedef LargeMmapAllocator<> SecondaryAllocator;
36typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
37 SecondaryAllocator> Allocator;
38
39static Allocator allocator;
Sergey Matveev866abfb2013-05-20 10:54:00 +000040
41void InitializeAllocator() {
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +000042 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +000043 allocator.InitLinkerInitialized(
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +000044 common_flags()->allocator_release_to_os_interval_ms);
Sergey Matveev866abfb2013-05-20 10:54:00 +000045}
46
47void AllocatorThreadFinish() {
Francis Riccidc139212017-03-22 18:42:43 +000048 allocator.SwallowCache(GetAllocatorCache());
Sergey Matveev866abfb2013-05-20 10:54:00 +000049}
50
Sergey Matveev08347ca2014-08-26 14:28:28 +000051static ChunkMetadata *Metadata(const void *p) {
Sergey Matveev4e0215a2013-06-24 08:34:50 +000052 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +000053}
54
55static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
56 if (!p) return;
57 ChunkMetadata *m = Metadata(p);
58 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000059 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Alexey Samsonov3741ab82014-10-26 06:23:07 +000060 m->stack_trace_id = StackDepotPut(stack);
Sergey Matveev866abfb2013-05-20 10:54:00 +000061 m->requested_size = size;
Sergey Matveev4e0215a2013-06-24 08:34:50 +000062 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000063}
64
65static void RegisterDeallocation(void *p) {
66 if (!p) return;
67 ChunkMetadata *m = Metadata(p);
68 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +000069 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000070}
71
72void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
73 bool cleared) {
74 if (size == 0)
75 size = 1;
76 if (size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000077 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
Alex Shlyapnikovd3213c72017-06-30 17:21:34 +000078 return Allocator::FailureHandler::OnBadRequest();
Sergey Matveev866abfb2013-05-20 10:54:00 +000079 }
Alex Shlyapnikov5a308f22017-06-16 21:00:03 +000080 void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
Sergey Matveev10548682013-11-24 14:28:18 +000081 // Do not rely on the allocator to clear the memory (it's slow).
82 if (cleared && allocator.FromPrimary(p))
83 memset(p, 0, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000084 RegisterAllocation(stack, p, size);
Sergey Matveev08347ca2014-08-26 14:28:28 +000085 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +000086 RunMallocHooks(p, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000087 return p;
88}
89
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +000090static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
91 if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
92 return Allocator::FailureHandler::OnBadRequest();
93 size *= nmemb;
94 return Allocate(stack, size, 1, true);
95}
96
Sergey Matveev866abfb2013-05-20 10:54:00 +000097void Deallocate(void *p) {
Sergey Matveev08347ca2014-08-26 14:28:28 +000098 if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +000099 RunFreeHooks(p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000100 RegisterDeallocation(p);
Francis Riccidc139212017-03-22 18:42:43 +0000101 allocator.Deallocate(GetAllocatorCache(), p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000102}
103
104void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
105 uptr alignment) {
106 RegisterDeallocation(p);
107 if (new_size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +0000108 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
Francis Riccidc139212017-03-22 18:42:43 +0000109 allocator.Deallocate(GetAllocatorCache(), p);
Alex Shlyapnikovd3213c72017-06-30 17:21:34 +0000110 return Allocator::FailureHandler::OnBadRequest();
Sergey Matveev866abfb2013-05-20 10:54:00 +0000111 }
Francis Riccidc139212017-03-22 18:42:43 +0000112 p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000113 RegisterAllocation(stack, p, new_size);
114 return p;
115}
116
117void GetAllocatorCacheRange(uptr *begin, uptr *end) {
Francis Riccidc139212017-03-22 18:42:43 +0000118 *begin = (uptr)GetAllocatorCache();
119 *end = *begin + sizeof(AllocatorCache);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000120}
121
Sergey Matveev08347ca2014-08-26 14:28:28 +0000122uptr GetMallocUsableSize(const void *p) {
Sergey Matveev866abfb2013-05-20 10:54:00 +0000123 ChunkMetadata *m = Metadata(p);
124 if (!m) return 0;
125 return m->requested_size;
126}
127
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000128inline void *check_ptr(void *ptr) {
129 if (UNLIKELY(!ptr))
130 errno = errno_ENOMEM;
131 return ptr;
132}
133
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000134void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000135 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
136 errno = errno_EINVAL;
137 return Allocator::FailureHandler::OnBadRequest();
138 }
139 return check_ptr(Allocate(stack, size, alignment, kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000140}
141
142void *lsan_malloc(uptr size, const StackTrace &stack) {
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000143 return check_ptr(Allocate(stack, size, 1, kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000144}
145
146void lsan_free(void *p) {
147 Deallocate(p);
148}
149
150void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000151 return check_ptr(Reallocate(stack, p, size, 1));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000152}
153
154void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000155 return check_ptr(Calloc(nmemb, size, stack));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000156}
157
158void *lsan_valloc(uptr size, const StackTrace &stack) {
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000159 return check_ptr(
160 Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000161}
162
163uptr lsan_mz_size(const void *p) {
164 return GetMallocUsableSize(p);
165}
166
Sergey Matveev866abfb2013-05-20 10:54:00 +0000167///// Interface to the common LSan module. /////
168
169void LockAllocator() {
170 allocator.ForceLock();
171}
172
173void UnlockAllocator() {
174 allocator.ForceUnlock();
175}
176
177void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
178 *begin = (uptr)&allocator;
179 *end = *begin + sizeof(allocator);
180}
181
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000182uptr PointsIntoChunk(void* p) {
183 uptr addr = reinterpret_cast<uptr>(p);
184 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000185 if (!chunk) return 0;
186 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
187 // valid, but we don't want that.
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000188 if (addr < chunk) return 0;
189 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000190 CHECK(m);
Kostya Serebryany2b762782014-01-10 10:48:01 +0000191 if (!m->allocated)
192 return 0;
193 if (addr < chunk + m->requested_size)
194 return chunk;
195 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
Sergey Matveev866abfb2013-05-20 10:54:00 +0000196 return chunk;
197 return 0;
198}
199
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000200uptr GetUserBegin(uptr chunk) {
201 return chunk;
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000202}
203
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000204LsanMetadata::LsanMetadata(uptr chunk) {
205 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000206 CHECK(metadata_);
207}
208
209bool LsanMetadata::allocated() const {
210 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
211}
212
213ChunkTag LsanMetadata::tag() const {
214 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
215}
216
217void LsanMetadata::set_tag(ChunkTag value) {
218 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
219}
220
221uptr LsanMetadata::requested_size() const {
222 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
223}
224
225u32 LsanMetadata::stack_trace_id() const {
226 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
227}
228
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000229void ForEachChunk(ForEachChunkCallback callback, void *arg) {
230 allocator.ForEachChunk(callback, arg);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000231}
232
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000233IgnoreObjectResult IgnoreObjectLocked(const void *p) {
234 void *chunk = allocator.GetBlockBegin(p);
235 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
236 ChunkMetadata *m = Metadata(chunk);
237 CHECK(m);
238 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000239 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000240 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000241 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000242 return kIgnoreObjectSuccess;
243 } else {
244 return kIgnoreObjectInvalid;
245 }
246}
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000247} // namespace __lsan
Sergey Matveev08347ca2014-08-26 14:28:28 +0000248
249using namespace __lsan;
250
251extern "C" {
252SANITIZER_INTERFACE_ATTRIBUTE
253uptr __sanitizer_get_current_allocated_bytes() {
254 uptr stats[AllocatorStatCount];
255 allocator.GetStats(stats);
256 return stats[AllocatorStatAllocated];
257}
258
259SANITIZER_INTERFACE_ATTRIBUTE
260uptr __sanitizer_get_heap_size() {
261 uptr stats[AllocatorStatCount];
262 allocator.GetStats(stats);
263 return stats[AllocatorStatMapped];
264}
265
266SANITIZER_INTERFACE_ATTRIBUTE
267uptr __sanitizer_get_free_bytes() { return 0; }
268
269SANITIZER_INTERFACE_ATTRIBUTE
270uptr __sanitizer_get_unmapped_bytes() { return 0; }
271
272SANITIZER_INTERFACE_ATTRIBUTE
273uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
274
275SANITIZER_INTERFACE_ATTRIBUTE
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000276int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
Sergey Matveev08347ca2014-08-26 14:28:28 +0000277
278SANITIZER_INTERFACE_ATTRIBUTE
279uptr __sanitizer_get_allocated_size(const void *p) {
280 return GetMallocUsableSize(p);
281}
Francis Ricci9a2a9922017-01-07 00:31:20 +0000282
283#if !SANITIZER_SUPPORTS_WEAK_HOOKS
284// Provide default (no-op) implementation of malloc hooks.
285SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
286void __sanitizer_malloc_hook(void *ptr, uptr size) {
287 (void)ptr;
288 (void)size;
289}
290SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
291void __sanitizer_free_hook(void *ptr) {
292 (void)ptr;
293}
294#endif
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000295} // extern "C"