blob: 6514aea6f6098ca8b3e844866d6180b5480ece9d [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
Sergey Matveev08347ca2014-08-26 14:28:28 +000018#include "sanitizer_common/sanitizer_allocator_interface.h"
Sergey Matveev866abfb2013-05-20 10:54:00 +000019#include "sanitizer_common/sanitizer_internal_defs.h"
20#include "sanitizer_common/sanitizer_stackdepot.h"
21#include "sanitizer_common/sanitizer_stacktrace.h"
22#include "lsan_common.h"
23
Sergey Matveev10548682013-11-24 14:28:18 +000024extern "C" void *memset(void *ptr, int value, uptr num);
25
Sergey Matveev866abfb2013-05-20 10:54:00 +000026namespace __lsan {
Maxim Ostapenkode3b9a22017-04-11 14:58:26 +000027#if defined(__i386__) || defined(__arm__)
Maxim Ostapenko651cfe32017-01-31 07:15:37 +000028static const uptr kMaxAllowedMallocSize = 1UL << 30;
Francis Riccid668a012017-03-27 14:07:50 +000029#elif defined(__mips64) || defined(__aarch64__)
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000030static const uptr kMaxAllowedMallocSize = 4UL << 30;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000031#else
32static const uptr kMaxAllowedMallocSize = 8UL << 30;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000033#endif
Sergey Matveev866abfb2013-05-20 10:54:00 +000034typedef LargeMmapAllocator<> SecondaryAllocator;
35typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
36 SecondaryAllocator> Allocator;
37
38static Allocator allocator;
Sergey Matveev866abfb2013-05-20 10:54:00 +000039
40void InitializeAllocator() {
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +000041 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +000042 allocator.InitLinkerInitialized(
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +000043 common_flags()->allocator_release_to_os_interval_ms);
Sergey Matveev866abfb2013-05-20 10:54:00 +000044}
45
46void AllocatorThreadFinish() {
Francis Riccidc139212017-03-22 18:42:43 +000047 allocator.SwallowCache(GetAllocatorCache());
Sergey Matveev866abfb2013-05-20 10:54:00 +000048}
49
Sergey Matveev08347ca2014-08-26 14:28:28 +000050static ChunkMetadata *Metadata(const void *p) {
Sergey Matveev4e0215a2013-06-24 08:34:50 +000051 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +000052}
53
54static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
55 if (!p) return;
56 ChunkMetadata *m = Metadata(p);
57 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000058 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Alexey Samsonov3741ab82014-10-26 06:23:07 +000059 m->stack_trace_id = StackDepotPut(stack);
Sergey Matveev866abfb2013-05-20 10:54:00 +000060 m->requested_size = size;
Sergey Matveev4e0215a2013-06-24 08:34:50 +000061 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000062}
63
64static void RegisterDeallocation(void *p) {
65 if (!p) return;
66 ChunkMetadata *m = Metadata(p);
67 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +000068 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000069}
70
71void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
72 bool cleared) {
73 if (size == 0)
74 size = 1;
75 if (size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000076 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
Alex Shlyapnikovd3213c72017-06-30 17:21:34 +000077 return Allocator::FailureHandler::OnBadRequest();
Sergey Matveev866abfb2013-05-20 10:54:00 +000078 }
Alex Shlyapnikov5a308f22017-06-16 21:00:03 +000079 void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
Sergey Matveev10548682013-11-24 14:28:18 +000080 // Do not rely on the allocator to clear the memory (it's slow).
81 if (cleared && allocator.FromPrimary(p))
82 memset(p, 0, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000083 RegisterAllocation(stack, p, size);
Sergey Matveev08347ca2014-08-26 14:28:28 +000084 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +000085 RunMallocHooks(p, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000086 return p;
87}
88
89void Deallocate(void *p) {
Sergey Matveev08347ca2014-08-26 14:28:28 +000090 if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +000091 RunFreeHooks(p);
Sergey Matveev866abfb2013-05-20 10:54:00 +000092 RegisterDeallocation(p);
Francis Riccidc139212017-03-22 18:42:43 +000093 allocator.Deallocate(GetAllocatorCache(), p);
Sergey Matveev866abfb2013-05-20 10:54:00 +000094}
95
96void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
97 uptr alignment) {
98 RegisterDeallocation(p);
99 if (new_size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +0000100 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
Francis Riccidc139212017-03-22 18:42:43 +0000101 allocator.Deallocate(GetAllocatorCache(), p);
Alex Shlyapnikovd3213c72017-06-30 17:21:34 +0000102 return Allocator::FailureHandler::OnBadRequest();
Sergey Matveev866abfb2013-05-20 10:54:00 +0000103 }
Francis Riccidc139212017-03-22 18:42:43 +0000104 p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000105 RegisterAllocation(stack, p, new_size);
106 return p;
107}
108
109void GetAllocatorCacheRange(uptr *begin, uptr *end) {
Francis Riccidc139212017-03-22 18:42:43 +0000110 *begin = (uptr)GetAllocatorCache();
111 *end = *begin + sizeof(AllocatorCache);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000112}
113
Sergey Matveev08347ca2014-08-26 14:28:28 +0000114uptr GetMallocUsableSize(const void *p) {
Sergey Matveev866abfb2013-05-20 10:54:00 +0000115 ChunkMetadata *m = Metadata(p);
116 if (!m) return 0;
117 return m->requested_size;
118}
119
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000120void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
121 return Allocate(stack, size, alignment, kAlwaysClearMemory);
122}
123
124void *lsan_malloc(uptr size, const StackTrace &stack) {
125 return Allocate(stack, size, 1, kAlwaysClearMemory);
126}
127
128void lsan_free(void *p) {
129 Deallocate(p);
130}
131
132void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
133 return Reallocate(stack, p, size, 1);
134}
135
136void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
Alex Shlyapnikovd3213c72017-06-30 17:21:34 +0000137 if (CheckForCallocOverflow(size, nmemb))
138 return Allocator::FailureHandler::OnBadRequest();
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000139 size *= nmemb;
Alex Shlyapnikov346988b2017-06-29 21:54:38 +0000140 return Allocate(stack, size, 1, true);
Francis Ricci03b2a8e2017-04-11 20:05:02 +0000141}
142
143void *lsan_valloc(uptr size, const StackTrace &stack) {
144 if (size == 0)
145 size = GetPageSizeCached();
146 return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
147}
148
149uptr lsan_mz_size(const void *p) {
150 return GetMallocUsableSize(p);
151}
152
Sergey Matveev866abfb2013-05-20 10:54:00 +0000153///// Interface to the common LSan module. /////
154
155void LockAllocator() {
156 allocator.ForceLock();
157}
158
159void UnlockAllocator() {
160 allocator.ForceUnlock();
161}
162
163void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
164 *begin = (uptr)&allocator;
165 *end = *begin + sizeof(allocator);
166}
167
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000168uptr PointsIntoChunk(void* p) {
169 uptr addr = reinterpret_cast<uptr>(p);
170 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000171 if (!chunk) return 0;
172 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
173 // valid, but we don't want that.
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000174 if (addr < chunk) return 0;
175 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000176 CHECK(m);
Kostya Serebryany2b762782014-01-10 10:48:01 +0000177 if (!m->allocated)
178 return 0;
179 if (addr < chunk + m->requested_size)
180 return chunk;
181 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
Sergey Matveev866abfb2013-05-20 10:54:00 +0000182 return chunk;
183 return 0;
184}
185
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000186uptr GetUserBegin(uptr chunk) {
187 return chunk;
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000188}
189
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000190LsanMetadata::LsanMetadata(uptr chunk) {
191 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000192 CHECK(metadata_);
193}
194
195bool LsanMetadata::allocated() const {
196 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
197}
198
199ChunkTag LsanMetadata::tag() const {
200 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
201}
202
203void LsanMetadata::set_tag(ChunkTag value) {
204 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
205}
206
207uptr LsanMetadata::requested_size() const {
208 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
209}
210
211u32 LsanMetadata::stack_trace_id() const {
212 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
213}
214
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000215void ForEachChunk(ForEachChunkCallback callback, void *arg) {
216 allocator.ForEachChunk(callback, arg);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000217}
218
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000219IgnoreObjectResult IgnoreObjectLocked(const void *p) {
220 void *chunk = allocator.GetBlockBegin(p);
221 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
222 ChunkMetadata *m = Metadata(chunk);
223 CHECK(m);
224 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000225 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000226 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000227 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000228 return kIgnoreObjectSuccess;
229 } else {
230 return kIgnoreObjectInvalid;
231 }
232}
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000233} // namespace __lsan
Sergey Matveev08347ca2014-08-26 14:28:28 +0000234
235using namespace __lsan;
236
237extern "C" {
238SANITIZER_INTERFACE_ATTRIBUTE
239uptr __sanitizer_get_current_allocated_bytes() {
240 uptr stats[AllocatorStatCount];
241 allocator.GetStats(stats);
242 return stats[AllocatorStatAllocated];
243}
244
245SANITIZER_INTERFACE_ATTRIBUTE
246uptr __sanitizer_get_heap_size() {
247 uptr stats[AllocatorStatCount];
248 allocator.GetStats(stats);
249 return stats[AllocatorStatMapped];
250}
251
252SANITIZER_INTERFACE_ATTRIBUTE
253uptr __sanitizer_get_free_bytes() { return 0; }
254
255SANITIZER_INTERFACE_ATTRIBUTE
256uptr __sanitizer_get_unmapped_bytes() { return 0; }
257
258SANITIZER_INTERFACE_ATTRIBUTE
259uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
260
261SANITIZER_INTERFACE_ATTRIBUTE
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000262int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
Sergey Matveev08347ca2014-08-26 14:28:28 +0000263
264SANITIZER_INTERFACE_ATTRIBUTE
265uptr __sanitizer_get_allocated_size(const void *p) {
266 return GetMallocUsableSize(p);
267}
Francis Ricci9a2a9922017-01-07 00:31:20 +0000268
269#if !SANITIZER_SUPPORTS_WEAK_HOOKS
270// Provide default (no-op) implementation of malloc hooks.
271SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
272void __sanitizer_malloc_hook(void *ptr, uptr size) {
273 (void)ptr;
274 (void)size;
275}
276SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
277void __sanitizer_free_hook(void *ptr) {
278 (void)ptr;
279}
280#endif
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000281} // extern "C"