blob: a5220f1a34b11f1b1e05a33e0e3e38637541092f [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
Sergey Matveev08347ca2014-08-26 14:28:28 +000018#include "sanitizer_common/sanitizer_allocator_interface.h"
Sergey Matveev866abfb2013-05-20 10:54:00 +000019#include "sanitizer_common/sanitizer_internal_defs.h"
20#include "sanitizer_common/sanitizer_stackdepot.h"
21#include "sanitizer_common/sanitizer_stacktrace.h"
22#include "lsan_common.h"
23
Sergey Matveev10548682013-11-24 14:28:18 +000024extern "C" void *memset(void *ptr, int value, uptr num);
25
Sergey Matveev866abfb2013-05-20 10:54:00 +000026namespace __lsan {
27
Sergey Matveev866abfb2013-05-20 10:54:00 +000028struct ChunkMetadata {
Evgeniy Stepanov10c9ea52015-09-16 19:54:36 +000029 u8 allocated : 8; // Must be first.
Sergey Matveev866abfb2013-05-20 10:54:00 +000030 ChunkTag tag : 2;
31 uptr requested_size : 54;
32 u32 stack_trace_id;
33};
34
Adhemerval Zanellab0d5dd02015-10-21 13:08:06 +000035#if defined(__mips64) || defined(__aarch64__)
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000036static const uptr kMaxAllowedMallocSize = 4UL << 30;
37static const uptr kRegionSizeLog = 20;
38static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
39typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
40typedef CompactSizeClassMap SizeClassMap;
41typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
42 sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
43 PrimaryAllocator;
44#else
45static const uptr kMaxAllowedMallocSize = 8UL << 30;
46static const uptr kAllocatorSpace = 0x600000000000ULL;
47static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
Sergey Matveev866abfb2013-05-20 10:54:00 +000048typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
Sergey Matveev10548682013-11-24 14:28:18 +000049 sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +000050#endif
Sergey Matveev866abfb2013-05-20 10:54:00 +000051typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
52typedef LargeMmapAllocator<> SecondaryAllocator;
53typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
54 SecondaryAllocator> Allocator;
55
56static Allocator allocator;
57static THREADLOCAL AllocatorCache cache;
58
59void InitializeAllocator() {
Sergey Matveev8cd909e2014-12-19 14:45:19 +000060 allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
Sergey Matveev866abfb2013-05-20 10:54:00 +000061}
62
63void AllocatorThreadFinish() {
64 allocator.SwallowCache(&cache);
65}
66
Sergey Matveev08347ca2014-08-26 14:28:28 +000067static ChunkMetadata *Metadata(const void *p) {
Sergey Matveev4e0215a2013-06-24 08:34:50 +000068 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +000069}
70
71static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
72 if (!p) return;
73 ChunkMetadata *m = Metadata(p);
74 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000075 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Alexey Samsonov3741ab82014-10-26 06:23:07 +000076 m->stack_trace_id = StackDepotPut(stack);
Sergey Matveev866abfb2013-05-20 10:54:00 +000077 m->requested_size = size;
Sergey Matveev4e0215a2013-06-24 08:34:50 +000078 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000079}
80
81static void RegisterDeallocation(void *p) {
82 if (!p) return;
83 ChunkMetadata *m = Metadata(p);
84 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +000085 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000086}
87
88void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
89 bool cleared) {
90 if (size == 0)
91 size = 1;
92 if (size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000093 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
Vedant Kumar59ba7b82015-10-01 00:22:21 +000094 return nullptr;
Sergey Matveev866abfb2013-05-20 10:54:00 +000095 }
Sergey Matveev10548682013-11-24 14:28:18 +000096 void *p = allocator.Allocate(&cache, size, alignment, false);
97 // Do not rely on the allocator to clear the memory (it's slow).
98 if (cleared && allocator.FromPrimary(p))
99 memset(p, 0, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000100 RegisterAllocation(stack, p, size);
Sergey Matveev08347ca2014-08-26 14:28:28 +0000101 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +0000102 RunMallocHooks(p, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000103 return p;
104}
105
106void Deallocate(void *p) {
Sergey Matveev08347ca2014-08-26 14:28:28 +0000107 if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
Kostya Serebryanybf6a04f2016-06-16 20:06:06 +0000108 RunFreeHooks(p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000109 RegisterDeallocation(p);
110 allocator.Deallocate(&cache, p);
111}
112
113void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
114 uptr alignment) {
115 RegisterDeallocation(p);
116 if (new_size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +0000117 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
118 allocator.Deallocate(&cache, p);
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000119 return nullptr;
Sergey Matveev866abfb2013-05-20 10:54:00 +0000120 }
121 p = allocator.Reallocate(&cache, p, new_size, alignment);
122 RegisterAllocation(stack, p, new_size);
123 return p;
124}
125
126void GetAllocatorCacheRange(uptr *begin, uptr *end) {
127 *begin = (uptr)&cache;
128 *end = *begin + sizeof(cache);
129}
130
Sergey Matveev08347ca2014-08-26 14:28:28 +0000131uptr GetMallocUsableSize(const void *p) {
Sergey Matveev866abfb2013-05-20 10:54:00 +0000132 ChunkMetadata *m = Metadata(p);
133 if (!m) return 0;
134 return m->requested_size;
135}
136
137///// Interface to the common LSan module. /////
138
139void LockAllocator() {
140 allocator.ForceLock();
141}
142
143void UnlockAllocator() {
144 allocator.ForceUnlock();
145}
146
147void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
148 *begin = (uptr)&allocator;
149 *end = *begin + sizeof(allocator);
150}
151
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000152uptr PointsIntoChunk(void* p) {
153 uptr addr = reinterpret_cast<uptr>(p);
154 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000155 if (!chunk) return 0;
156 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
157 // valid, but we don't want that.
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000158 if (addr < chunk) return 0;
159 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000160 CHECK(m);
Kostya Serebryany2b762782014-01-10 10:48:01 +0000161 if (!m->allocated)
162 return 0;
163 if (addr < chunk + m->requested_size)
164 return chunk;
165 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
Sergey Matveev866abfb2013-05-20 10:54:00 +0000166 return chunk;
167 return 0;
168}
169
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000170uptr GetUserBegin(uptr chunk) {
171 return chunk;
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000172}
173
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000174LsanMetadata::LsanMetadata(uptr chunk) {
175 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000176 CHECK(metadata_);
177}
178
179bool LsanMetadata::allocated() const {
180 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
181}
182
183ChunkTag LsanMetadata::tag() const {
184 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
185}
186
187void LsanMetadata::set_tag(ChunkTag value) {
188 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
189}
190
191uptr LsanMetadata::requested_size() const {
192 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
193}
194
195u32 LsanMetadata::stack_trace_id() const {
196 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
197}
198
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000199void ForEachChunk(ForEachChunkCallback callback, void *arg) {
200 allocator.ForEachChunk(callback, arg);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000201}
202
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000203IgnoreObjectResult IgnoreObjectLocked(const void *p) {
204 void *chunk = allocator.GetBlockBegin(p);
205 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
206 ChunkMetadata *m = Metadata(chunk);
207 CHECK(m);
208 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000209 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000210 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000211 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000212 return kIgnoreObjectSuccess;
213 } else {
214 return kIgnoreObjectInvalid;
215 }
216}
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000217} // namespace __lsan
Sergey Matveev08347ca2014-08-26 14:28:28 +0000218
219using namespace __lsan;
220
221extern "C" {
222SANITIZER_INTERFACE_ATTRIBUTE
223uptr __sanitizer_get_current_allocated_bytes() {
224 uptr stats[AllocatorStatCount];
225 allocator.GetStats(stats);
226 return stats[AllocatorStatAllocated];
227}
228
229SANITIZER_INTERFACE_ATTRIBUTE
230uptr __sanitizer_get_heap_size() {
231 uptr stats[AllocatorStatCount];
232 allocator.GetStats(stats);
233 return stats[AllocatorStatMapped];
234}
235
236SANITIZER_INTERFACE_ATTRIBUTE
237uptr __sanitizer_get_free_bytes() { return 0; }
238
239SANITIZER_INTERFACE_ATTRIBUTE
240uptr __sanitizer_get_unmapped_bytes() { return 0; }
241
242SANITIZER_INTERFACE_ATTRIBUTE
243uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
244
245SANITIZER_INTERFACE_ATTRIBUTE
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000246int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
Sergey Matveev08347ca2014-08-26 14:28:28 +0000247
248SANITIZER_INTERFACE_ATTRIBUTE
249uptr __sanitizer_get_allocated_size(const void *p) {
250 return GetMallocUsableSize(p);
251}
Vedant Kumar59ba7b82015-10-01 00:22:21 +0000252} // extern "C"