blob: c58c3548002f1de8b5c2d98032bbef986bdb9e63 [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
Alex Shlyapnikov42bea012017-07-18 19:11:04 +000018#include "sanitizer_common/sanitizer_allocator_checks.h"
Sergey Matveev08347ca2014-08-26 14:28:28 +000019#include "sanitizer_common/sanitizer_allocator_interface.h"
Alex Shlyapnikov236c3f92018-06-05 18:02:09 +000020#include "sanitizer_common/sanitizer_allocator_report.h"
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +000021#include "sanitizer_common/sanitizer_errno.h"
Sergey Matveev866abfb2013-05-20 10:54:00 +000022#include "sanitizer_common/sanitizer_internal_defs.h"
23#include "sanitizer_common/sanitizer_stackdepot.h"
24#include "sanitizer_common/sanitizer_stacktrace.h"
25#include "lsan_common.h"
26
Sergey Matveev10548682013-11-24 14:28:18 +000027extern "C" void *memset(void *ptr, int value, uptr num);
28
Sergey Matveev866abfb2013-05-20 10:54:00 +000029namespace __lsan {
Maxim Ostapenkode3b9a22017-04-11 14:58:26 +000030#if defined(__i386__) || defined(__arm__)
Maxim Ostapenko651cfe32017-01-31 07:15:37 +000031static const uptr kMaxAllowedMallocSize = 1UL << 30;
Francis Riccid668a012017-03-27 14:07:50 +000032#elif defined(__mips64) || defined(__aarch64__)
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000033static const uptr kMaxAllowedMallocSize = 4UL << 30;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000034#else
35static const uptr kMaxAllowedMallocSize = 8UL << 30;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000036#endif
Sergey Matveev866abfb2013-05-20 10:54:00 +000037typedef LargeMmapAllocator<> SecondaryAllocator;
38typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
39 SecondaryAllocator> Allocator;
40
41static Allocator allocator;
Sergey Matveev866abfb2013-05-20 10:54:00 +000042
43void InitializeAllocator() {
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +000044 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +000045 allocator.InitLinkerInitialized(
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +000046 common_flags()->allocator_release_to_os_interval_ms);
Sergey Matveev866abfb2013-05-20 10:54:00 +000047}
48
49void AllocatorThreadFinish() {
Francis Riccidc139212017-03-22 18:42:43 +000050 allocator.SwallowCache(GetAllocatorCache());
Sergey Matveev866abfb2013-05-20 10:54:00 +000051}
52
Sergey Matveev08347ca2014-08-26 14:28:28 +000053static ChunkMetadata *Metadata(const void *p) {
Sergey Matveev4e0215a2013-06-24 08:34:50 +000054 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +000055}
56
57static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
58 if (!p) return;
59 ChunkMetadata *m = Metadata(p);
60 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000061 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Alexey Samsonov3741ab82014-10-26 06:23:07 +000062 m->stack_trace_id = StackDepotPut(stack);
Sergey Matveev866abfb2013-05-20 10:54:00 +000063 m->requested_size = size;
Sergey Matveev4e0215a2013-06-24 08:34:50 +000064 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000065}
66
67static void RegisterDeallocation(void *p) {
68 if (!p) return;
69 ChunkMetadata *m = Metadata(p);
70 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +000071 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000072}
73
Alex Shlyapnikov236c3f92018-06-05 18:02:09 +000074static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
75 if (AllocatorMayReturnNull()) {
76 Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
77 return nullptr;
78 }
79 ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
80}
81
Sergey Matveev866abfb2013-05-20 10:54:00 +000082void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
83 bool cleared) {
84 if (size == 0)
85 size = 1;
Alex Shlyapnikov236c3f92018-06-05 18:02:09 +000086 if (size > kMaxAllowedMallocSize)
87 return ReportAllocationSizeTooBig(size, stack);
Alex Shlyapnikov5a308f22017-06-16 21:00:03 +000088 void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
Alex Shlyapnikov236c3f92018-06-05 18:02:09 +000089 if (UNLIKELY(!p)) {
90 SetAllocatorOutOfMemory();
91 if (AllocatorMayReturnNull())
92 return nullptr;
93 ReportOutOfMemory(size, &stack);
94 }
Sergey Matveev10548682013-11-24 14:28:18 +000095 // Do not rely on the allocator to clear the memory (it's slow).
96 if (cleared && allocator.FromPrimary(p))
97 memset(p, 0, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000098 RegisterAllocation(stack, p, size);
Sergey Matveev08347ca2014-08-26 14:28:28 +000099 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +0000100 RunMallocHooks(p, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000101 return p;
102}
103
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000104static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
Alex Shlyapnikov236c3f92018-06-05 18:02:09 +0000105 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
106 if (AllocatorMayReturnNull())
107 return nullptr;
108 ReportCallocOverflow(nmemb, size, &stack);
109 }
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000110 size *= nmemb;
111 return Allocate(stack, size, 1, true);
112}
113
Sergey Matveev866abfb2013-05-20 10:54:00 +0000114void Deallocate(void *p) {
Sergey Matveev08347ca2014-08-26 14:28:28 +0000115 if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +0000116 RunFreeHooks(p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000117 RegisterDeallocation(p);
Francis Riccidc139212017-03-22 18:42:43 +0000118 allocator.Deallocate(GetAllocatorCache(), p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000119}
120
121void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
122 uptr alignment) {
123 RegisterDeallocation(p);
124 if (new_size > kMaxAllowedMallocSize) {
Francis Riccidc139212017-03-22 18:42:43 +0000125 allocator.Deallocate(GetAllocatorCache(), p);
Alex Shlyapnikov236c3f92018-06-05 18:02:09 +0000126 return ReportAllocationSizeTooBig(new_size, stack);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000127 }
Francis Riccidc139212017-03-22 18:42:43 +0000128 p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000129 RegisterAllocation(stack, p, new_size);
130 return p;
131}
132
133void GetAllocatorCacheRange(uptr *begin, uptr *end) {
Francis Riccidc139212017-03-22 18:42:43 +0000134 *begin = (uptr)GetAllocatorCache();
135 *end = *begin + sizeof(AllocatorCache);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000136}
137
Sergey Matveev08347ca2014-08-26 14:28:28 +0000138uptr GetMallocUsableSize(const void *p) {
Sergey Matveev866abfb2013-05-20 10:54:00 +0000139 ChunkMetadata *m = Metadata(p);
140 if (!m) return 0;
141 return m->requested_size;
142}
143
Alex Shlyapnikov79a7c4f2018-03-12 21:59:06 +0000144int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
145 const StackTrace &stack) {
146 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
Alex Shlyapnikov236c3f92018-06-05 18:02:09 +0000147 if (AllocatorMayReturnNull())
148 return errno_EINVAL;
149 ReportInvalidPosixMemalignAlignment(alignment, &stack);
Alex Shlyapnikov79a7c4f2018-03-12 21:59:06 +0000150 }
151 void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
152 if (UNLIKELY(!ptr))
153 // OOM error is already taken care of by Allocate.
154 return errno_ENOMEM;
155 CHECK(IsAligned((uptr)ptr, alignment));
156 *memptr = ptr;
157 return 0;
158}
159
Alex Shlyapnikovdcf00972018-06-08 20:40:35 +0000160void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
161 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
162 errno = errno_EINVAL;
163 if (AllocatorMayReturnNull())
164 return nullptr;
165 ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
166 }
167 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
168}
169
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000170void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000171 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
172 errno = errno_EINVAL;
Alex Shlyapnikov236c3f92018-06-05 18:02:09 +0000173 if (AllocatorMayReturnNull())
174 return nullptr;
175 ReportInvalidAllocationAlignment(alignment, &stack);
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000176 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000177 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000178}
179
180void *lsan_malloc(uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000181 return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000182}
183
184void lsan_free(void *p) {
185 Deallocate(p);
186}
187
188void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000189 return SetErrnoOnNull(Reallocate(stack, p, size, 1));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000190}
191
192void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000193 return SetErrnoOnNull(Calloc(nmemb, size, stack));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000194}
195
196void *lsan_valloc(uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000197 return SetErrnoOnNull(
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000198 Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000199}
200
Alex Shlyapnikov78949402018-06-11 17:33:53 +0000201void *lsan_pvalloc(uptr size, const StackTrace &stack) {
202 uptr PageSize = GetPageSizeCached();
203 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
204 errno = errno_ENOMEM;
205 if (AllocatorMayReturnNull())
206 return nullptr;
207 ReportPvallocOverflow(size, &stack);
208 }
209 // pvalloc(0) should allocate one page.
210 size = size ? RoundUpTo(size, PageSize) : PageSize;
211 return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
212}
213
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000214uptr lsan_mz_size(const void *p) {
215 return GetMallocUsableSize(p);
216}
217
Sergey Matveev866abfb2013-05-20 10:54:00 +0000218///// Interface to the common LSan module. /////
219
220void LockAllocator() {
221 allocator.ForceLock();
222}
223
224void UnlockAllocator() {
225 allocator.ForceUnlock();
226}
227
228void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
229 *begin = (uptr)&allocator;
230 *end = *begin + sizeof(allocator);
231}
232
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000233uptr PointsIntoChunk(void* p) {
234 uptr addr = reinterpret_cast<uptr>(p);
235 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000236 if (!chunk) return 0;
237 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
238 // valid, but we don't want that.
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000239 if (addr < chunk) return 0;
240 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000241 CHECK(m);
Kostya Serebryany2b762782014-01-10 10:48:01 +0000242 if (!m->allocated)
243 return 0;
244 if (addr < chunk + m->requested_size)
245 return chunk;
246 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
Sergey Matveev866abfb2013-05-20 10:54:00 +0000247 return chunk;
248 return 0;
249}
250
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000251uptr GetUserBegin(uptr chunk) {
252 return chunk;
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000253}
254
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000255LsanMetadata::LsanMetadata(uptr chunk) {
256 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000257 CHECK(metadata_);
258}
259
260bool LsanMetadata::allocated() const {
261 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
262}
263
264ChunkTag LsanMetadata::tag() const {
265 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
266}
267
268void LsanMetadata::set_tag(ChunkTag value) {
269 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
270}
271
272uptr LsanMetadata::requested_size() const {
273 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
274}
275
276u32 LsanMetadata::stack_trace_id() const {
277 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
278}
279
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000280void ForEachChunk(ForEachChunkCallback callback, void *arg) {
281 allocator.ForEachChunk(callback, arg);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000282}
283
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000284IgnoreObjectResult IgnoreObjectLocked(const void *p) {
285 void *chunk = allocator.GetBlockBegin(p);
286 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
287 ChunkMetadata *m = Metadata(chunk);
288 CHECK(m);
289 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000290 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000291 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000292 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000293 return kIgnoreObjectSuccess;
294 } else {
295 return kIgnoreObjectInvalid;
296 }
297}
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000298} // namespace __lsan
Sergey Matveev08347ca2014-08-26 14:28:28 +0000299
300using namespace __lsan;
301
302extern "C" {
303SANITIZER_INTERFACE_ATTRIBUTE
304uptr __sanitizer_get_current_allocated_bytes() {
305 uptr stats[AllocatorStatCount];
306 allocator.GetStats(stats);
307 return stats[AllocatorStatAllocated];
308}
309
310SANITIZER_INTERFACE_ATTRIBUTE
311uptr __sanitizer_get_heap_size() {
312 uptr stats[AllocatorStatCount];
313 allocator.GetStats(stats);
314 return stats[AllocatorStatMapped];
315}
316
317SANITIZER_INTERFACE_ATTRIBUTE
318uptr __sanitizer_get_free_bytes() { return 0; }
319
320SANITIZER_INTERFACE_ATTRIBUTE
321uptr __sanitizer_get_unmapped_bytes() { return 0; }
322
323SANITIZER_INTERFACE_ATTRIBUTE
324uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
325
326SANITIZER_INTERFACE_ATTRIBUTE
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000327int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
Sergey Matveev08347ca2014-08-26 14:28:28 +0000328
329SANITIZER_INTERFACE_ATTRIBUTE
330uptr __sanitizer_get_allocated_size(const void *p) {
331 return GetMallocUsableSize(p);
332}
Francis Ricci9a2a9922017-01-07 00:31:20 +0000333
334#if !SANITIZER_SUPPORTS_WEAK_HOOKS
335// Provide default (no-op) implementation of malloc hooks.
336SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
337void __sanitizer_malloc_hook(void *ptr, uptr size) {
338 (void)ptr;
339 (void)size;
340}
341SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
342void __sanitizer_free_hook(void *ptr) {
343 (void)ptr;
344}
345#endif
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000346} // extern "C"