blob: 85721c431683f23ba1cfd9c0f618194b7c46af60 [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
Alex Shlyapnikov42bea012017-07-18 19:11:04 +000018#include "sanitizer_common/sanitizer_allocator_checks.h"
Sergey Matveev08347ca2014-08-26 14:28:28 +000019#include "sanitizer_common/sanitizer_allocator_interface.h"
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +000020#include "sanitizer_common/sanitizer_errno.h"
Sergey Matveev866abfb2013-05-20 10:54:00 +000021#include "sanitizer_common/sanitizer_internal_defs.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "lsan_common.h"
25
Sergey Matveev10548682013-11-24 14:28:18 +000026extern "C" void *memset(void *ptr, int value, uptr num);
27
Sergey Matveev866abfb2013-05-20 10:54:00 +000028namespace __lsan {
Maxim Ostapenkode3b9a22017-04-11 14:58:26 +000029#if defined(__i386__) || defined(__arm__)
Maxim Ostapenko651cfe32017-01-31 07:15:37 +000030static const uptr kMaxAllowedMallocSize = 1UL << 30;
Francis Riccid668a012017-03-27 14:07:50 +000031#elif defined(__mips64) || defined(__aarch64__)
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000032static const uptr kMaxAllowedMallocSize = 4UL << 30;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000033#else
34static const uptr kMaxAllowedMallocSize = 8UL << 30;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000035#endif
Sergey Matveev866abfb2013-05-20 10:54:00 +000036typedef LargeMmapAllocator<> SecondaryAllocator;
37typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
38 SecondaryAllocator> Allocator;
39
40static Allocator allocator;
Sergey Matveev866abfb2013-05-20 10:54:00 +000041
42void InitializeAllocator() {
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +000043 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +000044 allocator.InitLinkerInitialized(
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +000045 common_flags()->allocator_release_to_os_interval_ms);
Sergey Matveev866abfb2013-05-20 10:54:00 +000046}
47
48void AllocatorThreadFinish() {
Francis Riccidc139212017-03-22 18:42:43 +000049 allocator.SwallowCache(GetAllocatorCache());
Sergey Matveev866abfb2013-05-20 10:54:00 +000050}
51
Sergey Matveev08347ca2014-08-26 14:28:28 +000052static ChunkMetadata *Metadata(const void *p) {
Sergey Matveev4e0215a2013-06-24 08:34:50 +000053 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +000054}
55
56static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
57 if (!p) return;
58 ChunkMetadata *m = Metadata(p);
59 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000060 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Alexey Samsonov3741ab82014-10-26 06:23:07 +000061 m->stack_trace_id = StackDepotPut(stack);
Sergey Matveev866abfb2013-05-20 10:54:00 +000062 m->requested_size = size;
Sergey Matveev4e0215a2013-06-24 08:34:50 +000063 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000064}
65
66static void RegisterDeallocation(void *p) {
67 if (!p) return;
68 ChunkMetadata *m = Metadata(p);
69 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +000070 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000071}
72
73void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
74 bool cleared) {
75 if (size == 0)
76 size = 1;
77 if (size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000078 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
Alex Shlyapnikovfd283392018-01-17 23:20:36 +000079 return ReturnNullOrDieOnFailure::OnBadRequest();
Sergey Matveev866abfb2013-05-20 10:54:00 +000080 }
Alex Shlyapnikov5a308f22017-06-16 21:00:03 +000081 void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
Alex Shlyapnikovfd283392018-01-17 23:20:36 +000082 if (UNLIKELY(!p))
83 return ReturnNullOrDieOnFailure::OnOOM();
Sergey Matveev10548682013-11-24 14:28:18 +000084 // Do not rely on the allocator to clear the memory (it's slow).
85 if (cleared && allocator.FromPrimary(p))
86 memset(p, 0, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000087 RegisterAllocation(stack, p, size);
Sergey Matveev08347ca2014-08-26 14:28:28 +000088 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +000089 RunMallocHooks(p, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000090 return p;
91}
92
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +000093static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
94 if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
Alex Shlyapnikovfd283392018-01-17 23:20:36 +000095 return ReturnNullOrDieOnFailure::OnBadRequest();
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +000096 size *= nmemb;
97 return Allocate(stack, size, 1, true);
98}
99
Sergey Matveev866abfb2013-05-20 10:54:00 +0000100void Deallocate(void *p) {
Sergey Matveev08347ca2014-08-26 14:28:28 +0000101 if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +0000102 RunFreeHooks(p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000103 RegisterDeallocation(p);
Francis Riccidc139212017-03-22 18:42:43 +0000104 allocator.Deallocate(GetAllocatorCache(), p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000105}
106
107void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
108 uptr alignment) {
109 RegisterDeallocation(p);
110 if (new_size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +0000111 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
Francis Riccidc139212017-03-22 18:42:43 +0000112 allocator.Deallocate(GetAllocatorCache(), p);
Alex Shlyapnikovfd283392018-01-17 23:20:36 +0000113 return ReturnNullOrDieOnFailure::OnBadRequest();
Sergey Matveev866abfb2013-05-20 10:54:00 +0000114 }
Francis Riccidc139212017-03-22 18:42:43 +0000115 p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000116 RegisterAllocation(stack, p, new_size);
117 return p;
118}
119
120void GetAllocatorCacheRange(uptr *begin, uptr *end) {
Francis Riccidc139212017-03-22 18:42:43 +0000121 *begin = (uptr)GetAllocatorCache();
122 *end = *begin + sizeof(AllocatorCache);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000123}
124
Sergey Matveev08347ca2014-08-26 14:28:28 +0000125uptr GetMallocUsableSize(const void *p) {
Sergey Matveev866abfb2013-05-20 10:54:00 +0000126 ChunkMetadata *m = Metadata(p);
127 if (!m) return 0;
128 return m->requested_size;
129}
130
Alex Shlyapnikov79a7c4f2018-03-12 21:59:06 +0000131int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
132 const StackTrace &stack) {
133 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
134 ReturnNullOrDieOnFailure::OnBadRequest();
135 return errno_EINVAL;
136 }
137 void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
138 if (UNLIKELY(!ptr))
139 // OOM error is already taken care of by Allocate.
140 return errno_ENOMEM;
141 CHECK(IsAligned((uptr)ptr, alignment));
142 *memptr = ptr;
143 return 0;
144}
145
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000146void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000147 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
148 errno = errno_EINVAL;
Alex Shlyapnikovfd283392018-01-17 23:20:36 +0000149 return ReturnNullOrDieOnFailure::OnBadRequest();
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000150 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000151 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000152}
153
154void *lsan_malloc(uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000155 return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000156}
157
158void lsan_free(void *p) {
159 Deallocate(p);
160}
161
162void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000163 return SetErrnoOnNull(Reallocate(stack, p, size, 1));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000164}
165
166void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000167 return SetErrnoOnNull(Calloc(nmemb, size, stack));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000168}
169
170void *lsan_valloc(uptr size, const StackTrace &stack) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000171 return SetErrnoOnNull(
Alex Shlyapnikovd08c32b2017-07-14 22:23:46 +0000172 Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000173}
174
175uptr lsan_mz_size(const void *p) {
176 return GetMallocUsableSize(p);
177}
178
Sergey Matveev866abfb2013-05-20 10:54:00 +0000179///// Interface to the common LSan module. /////
180
181void LockAllocator() {
182 allocator.ForceLock();
183}
184
185void UnlockAllocator() {
186 allocator.ForceUnlock();
187}
188
189void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
190 *begin = (uptr)&allocator;
191 *end = *begin + sizeof(allocator);
192}
193
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000194uptr PointsIntoChunk(void* p) {
195 uptr addr = reinterpret_cast<uptr>(p);
196 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000197 if (!chunk) return 0;
198 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
199 // valid, but we don't want that.
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000200 if (addr < chunk) return 0;
201 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000202 CHECK(m);
Kostya Serebryany2b762782014-01-10 10:48:01 +0000203 if (!m->allocated)
204 return 0;
205 if (addr < chunk + m->requested_size)
206 return chunk;
207 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
Sergey Matveev866abfb2013-05-20 10:54:00 +0000208 return chunk;
209 return 0;
210}
211
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000212uptr GetUserBegin(uptr chunk) {
213 return chunk;
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000214}
215
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000216LsanMetadata::LsanMetadata(uptr chunk) {
217 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000218 CHECK(metadata_);
219}
220
221bool LsanMetadata::allocated() const {
222 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
223}
224
225ChunkTag LsanMetadata::tag() const {
226 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
227}
228
229void LsanMetadata::set_tag(ChunkTag value) {
230 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
231}
232
233uptr LsanMetadata::requested_size() const {
234 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
235}
236
237u32 LsanMetadata::stack_trace_id() const {
238 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
239}
240
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000241void ForEachChunk(ForEachChunkCallback callback, void *arg) {
242 allocator.ForEachChunk(callback, arg);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000243}
244
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000245IgnoreObjectResult IgnoreObjectLocked(const void *p) {
246 void *chunk = allocator.GetBlockBegin(p);
247 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
248 ChunkMetadata *m = Metadata(chunk);
249 CHECK(m);
250 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000251 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000252 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000253 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000254 return kIgnoreObjectSuccess;
255 } else {
256 return kIgnoreObjectInvalid;
257 }
258}
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000259} // namespace __lsan
Sergey Matveev08347ca2014-08-26 14:28:28 +0000260
261using namespace __lsan;
262
263extern "C" {
264SANITIZER_INTERFACE_ATTRIBUTE
265uptr __sanitizer_get_current_allocated_bytes() {
266 uptr stats[AllocatorStatCount];
267 allocator.GetStats(stats);
268 return stats[AllocatorStatAllocated];
269}
270
271SANITIZER_INTERFACE_ATTRIBUTE
272uptr __sanitizer_get_heap_size() {
273 uptr stats[AllocatorStatCount];
274 allocator.GetStats(stats);
275 return stats[AllocatorStatMapped];
276}
277
278SANITIZER_INTERFACE_ATTRIBUTE
279uptr __sanitizer_get_free_bytes() { return 0; }
280
281SANITIZER_INTERFACE_ATTRIBUTE
282uptr __sanitizer_get_unmapped_bytes() { return 0; }
283
284SANITIZER_INTERFACE_ATTRIBUTE
285uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
286
287SANITIZER_INTERFACE_ATTRIBUTE
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000288int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
Sergey Matveev08347ca2014-08-26 14:28:28 +0000289
290SANITIZER_INTERFACE_ATTRIBUTE
291uptr __sanitizer_get_allocated_size(const void *p) {
292 return GetMallocUsableSize(p);
293}
Francis Ricci9a2a9922017-01-07 00:31:20 +0000294
295#if !SANITIZER_SUPPORTS_WEAK_HOOKS
296// Provide default (no-op) implementation of malloc hooks.
297SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
298void __sanitizer_malloc_hook(void *ptr, uptr size) {
299 (void)ptr;
300 (void)size;
301}
302SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
303void __sanitizer_free_hook(void *ptr) {
304 (void)ptr;
305}
306#endif
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000307} // extern "C"