blob: 2df58b44f6b85bc8669d294d5052994740ee47b0 [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
Alex Shlyapnikov42bea012017-07-18 19:11:04 +000018#include "sanitizer_common/sanitizer_allocator_checks.h"
Sergey Matveev08347ca2014-08-26 14:28:28 +000019#include "sanitizer_common/sanitizer_allocator_interface.h"
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +000020#include "sanitizer_common/sanitizer_errno.h"
Sergey Matveev866abfb2013-05-20 10:54:00 +000021#include "sanitizer_common/sanitizer_internal_defs.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "lsan_common.h"
25
Sergey Matveev10548682013-11-24 14:28:18 +000026extern "C" void *memset(void *ptr, int value, uptr num);
27
Sergey Matveev866abfb2013-05-20 10:54:00 +000028namespace __lsan {
Maxim Ostapenkode3b9a22017-04-11 14:58:26 +000029#if defined(__i386__) || defined(__arm__)
Maxim Ostapenko651cfe32017-01-31 07:15:37 +000030static const uptr kMaxAllowedMallocSize = 1UL << 30;
Francis Riccid668a012017-03-27 14:07:50 +000031#elif defined(__mips64) || defined(__aarch64__)
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000032static const uptr kMaxAllowedMallocSize = 4UL << 30;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000033#else
34static const uptr kMaxAllowedMallocSize = 8UL << 30;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000035#endif
Sergey Matveev866abfb2013-05-20 10:54:00 +000036typedef LargeMmapAllocator<> SecondaryAllocator;
37typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
38 SecondaryAllocator> Allocator;
39
40static Allocator allocator;
Sergey Matveev866abfb2013-05-20 10:54:00 +000041
42void InitializeAllocator() {
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +000043 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +000044 allocator.InitLinkerInitialized(
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +000045 common_flags()->allocator_release_to_os_interval_ms);
Sergey Matveev866abfb2013-05-20 10:54:00 +000046}
47
48void AllocatorThreadFinish() {
Francis Riccidc139212017-03-22 18:42:43 +000049 allocator.SwallowCache(GetAllocatorCache());
Sergey Matveev866abfb2013-05-20 10:54:00 +000050}
51
Sergey Matveev08347ca2014-08-26 14:28:28 +000052static ChunkMetadata *Metadata(const void *p) {
Sergey Matveev4e0215a2013-06-24 08:34:50 +000053 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +000054}
55
56static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
57 if (!p) return;
58 ChunkMetadata *m = Metadata(p);
59 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000060 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Alexey Samsonov3741ab82014-10-26 06:23:07 +000061 m->stack_trace_id = StackDepotPut(stack);
Sergey Matveev866abfb2013-05-20 10:54:00 +000062 m->requested_size = size;
Sergey Matveev4e0215a2013-06-24 08:34:50 +000063 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000064}
65
66static void RegisterDeallocation(void *p) {
67 if (!p) return;
68 ChunkMetadata *m = Metadata(p);
69 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +000070 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000071}
72
73void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
74 bool cleared) {
75 if (size == 0)
76 size = 1;
77 if (size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000078 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
Alex Shlyapnikovd3213c72017-06-30 17:21:34 +000079 return Allocator::FailureHandler::OnBadRequest();
Sergey Matveev866abfb2013-05-20 10:54:00 +000080 }
Alex Shlyapnikov5a308f22017-06-16 21:00:03 +000081 void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
Sergey Matveev10548682013-11-24 14:28:18 +000082 // Do not rely on the allocator to clear the memory (it's slow).
83 if (cleared && allocator.FromPrimary(p))
84 memset(p, 0, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000085 RegisterAllocation(stack, p, size);
Sergey Matveev08347ca2014-08-26 14:28:28 +000086 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +000087 RunMallocHooks(p, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000088 return p;
89}
90
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +000091static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
92 if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
93 return Allocator::FailureHandler::OnBadRequest();
94 size *= nmemb;
95 return Allocate(stack, size, 1, true);
96}
97
Sergey Matveev866abfb2013-05-20 10:54:00 +000098void Deallocate(void *p) {
Sergey Matveev08347ca2014-08-26 14:28:28 +000099 if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +0000100 RunFreeHooks(p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000101 RegisterDeallocation(p);
Francis Riccidc139212017-03-22 18:42:43 +0000102 allocator.Deallocate(GetAllocatorCache(), p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000103}
104
105void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
106 uptr alignment) {
107 RegisterDeallocation(p);
108 if (new_size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +0000109 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
Francis Riccidc139212017-03-22 18:42:43 +0000110 allocator.Deallocate(GetAllocatorCache(), p);
Alex Shlyapnikovd3213c72017-06-30 17:21:34 +0000111 return Allocator::FailureHandler::OnBadRequest();
Sergey Matveev866abfb2013-05-20 10:54:00 +0000112 }
Francis Riccidc139212017-03-22 18:42:43 +0000113 p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000114 RegisterAllocation(stack, p, new_size);
115 return p;
116}
117
118void GetAllocatorCacheRange(uptr *begin, uptr *end) {
Francis Riccidc139212017-03-22 18:42:43 +0000119 *begin = (uptr)GetAllocatorCache();
120 *end = *begin + sizeof(AllocatorCache);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000121}
122
Sergey Matveev08347ca2014-08-26 14:28:28 +0000123uptr GetMallocUsableSize(const void *p) {
Sergey Matveev866abfb2013-05-20 10:54:00 +0000124 ChunkMetadata *m = Metadata(p);
125 if (!m) return 0;
126 return m->requested_size;
127}
128
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000129void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000130 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
131 errno = errno_EINVAL;
132 return Allocator::FailureHandler::OnBadRequest();
133 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000134 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000135}
136
137void *lsan_malloc(uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000138 return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000139}
140
141void lsan_free(void *p) {
142 Deallocate(p);
143}
144
145void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000146 return SetErrnoOnNull(Reallocate(stack, p, size, 1));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000147}
148
149void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000150 return SetErrnoOnNull(Calloc(nmemb, size, stack));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000151}
152
153void *lsan_valloc(uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000154 return SetErrnoOnNull(
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000155 Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000156}
157
158uptr lsan_mz_size(const void *p) {
159 return GetMallocUsableSize(p);
160}
161
Sergey Matveev866abfb2013-05-20 10:54:00 +0000162///// Interface to the common LSan module. /////
163
164void LockAllocator() {
165 allocator.ForceLock();
166}
167
168void UnlockAllocator() {
169 allocator.ForceUnlock();
170}
171
172void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
173 *begin = (uptr)&allocator;
174 *end = *begin + sizeof(allocator);
175}
176
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000177uptr PointsIntoChunk(void* p) {
178 uptr addr = reinterpret_cast<uptr>(p);
179 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000180 if (!chunk) return 0;
181 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
182 // valid, but we don't want that.
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000183 if (addr < chunk) return 0;
184 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000185 CHECK(m);
Kostya Serebryany2b762782014-01-10 10:48:01 +0000186 if (!m->allocated)
187 return 0;
188 if (addr < chunk + m->requested_size)
189 return chunk;
190 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
Sergey Matveev866abfb2013-05-20 10:54:00 +0000191 return chunk;
192 return 0;
193}
194
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000195uptr GetUserBegin(uptr chunk) {
196 return chunk;
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000197}
198
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000199LsanMetadata::LsanMetadata(uptr chunk) {
200 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000201 CHECK(metadata_);
202}
203
204bool LsanMetadata::allocated() const {
205 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
206}
207
208ChunkTag LsanMetadata::tag() const {
209 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
210}
211
212void LsanMetadata::set_tag(ChunkTag value) {
213 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
214}
215
216uptr LsanMetadata::requested_size() const {
217 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
218}
219
220u32 LsanMetadata::stack_trace_id() const {
221 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
222}
223
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000224void ForEachChunk(ForEachChunkCallback callback, void *arg) {
225 allocator.ForEachChunk(callback, arg);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000226}
227
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000228IgnoreObjectResult IgnoreObjectLocked(const void *p) {
229 void *chunk = allocator.GetBlockBegin(p);
230 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
231 ChunkMetadata *m = Metadata(chunk);
232 CHECK(m);
233 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000234 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000235 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000236 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000237 return kIgnoreObjectSuccess;
238 } else {
239 return kIgnoreObjectInvalid;
240 }
241}
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000242} // namespace __lsan
Sergey Matveev08347ca2014-08-26 14:28:28 +0000243
244using namespace __lsan;
245
246extern "C" {
247SANITIZER_INTERFACE_ATTRIBUTE
248uptr __sanitizer_get_current_allocated_bytes() {
249 uptr stats[AllocatorStatCount];
250 allocator.GetStats(stats);
251 return stats[AllocatorStatAllocated];
252}
253
254SANITIZER_INTERFACE_ATTRIBUTE
255uptr __sanitizer_get_heap_size() {
256 uptr stats[AllocatorStatCount];
257 allocator.GetStats(stats);
258 return stats[AllocatorStatMapped];
259}
260
261SANITIZER_INTERFACE_ATTRIBUTE
262uptr __sanitizer_get_free_bytes() { return 0; }
263
264SANITIZER_INTERFACE_ATTRIBUTE
265uptr __sanitizer_get_unmapped_bytes() { return 0; }
266
267SANITIZER_INTERFACE_ATTRIBUTE
268uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
269
270SANITIZER_INTERFACE_ATTRIBUTE
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000271int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
Sergey Matveev08347ca2014-08-26 14:28:28 +0000272
273SANITIZER_INTERFACE_ATTRIBUTE
274uptr __sanitizer_get_allocated_size(const void *p) {
275 return GetMallocUsableSize(p);
276}
Francis Ricci9a2a9922017-01-07 00:31:20 +0000277
278#if !SANITIZER_SUPPORTS_WEAK_HOOKS
279// Provide default (no-op) implementation of malloc hooks.
280SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
281void __sanitizer_malloc_hook(void *ptr, uptr size) {
282 (void)ptr;
283 (void)size;
284}
285SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
286void __sanitizer_free_hook(void *ptr) {
287 (void)ptr;
288}
289#endif
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000290} // extern "C"