blob: 32d76d12fb41a4b07fc997eeeca795f502b16079 [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
Sergey Matveev08347ca2014-08-26 14:28:28 +000018#include "sanitizer_common/sanitizer_allocator_interface.h"
Sergey Matveev866abfb2013-05-20 10:54:00 +000019#include "sanitizer_common/sanitizer_internal_defs.h"
20#include "sanitizer_common/sanitizer_stackdepot.h"
21#include "sanitizer_common/sanitizer_stacktrace.h"
22#include "lsan_common.h"
23
Sergey Matveev10548682013-11-24 14:28:18 +000024extern "C" void *memset(void *ptr, int value, uptr num);
25
Sergey Matveev866abfb2013-05-20 10:54:00 +000026namespace __lsan {
27
Sergey Matveevd28c03c2013-06-21 15:10:20 +000028static const uptr kMaxAllowedMallocSize = 8UL << 30;
Sergey Matveev866abfb2013-05-20 10:54:00 +000029static const uptr kAllocatorSpace = 0x600000000000ULL;
Sergey Matveevd28c03c2013-06-21 15:10:20 +000030static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
Sergey Matveev866abfb2013-05-20 10:54:00 +000031
32struct ChunkMetadata {
33 bool allocated : 8; // Must be first.
34 ChunkTag tag : 2;
35 uptr requested_size : 54;
36 u32 stack_trace_id;
37};
38
39typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
Sergey Matveev10548682013-11-24 14:28:18 +000040 sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
Sergey Matveev866abfb2013-05-20 10:54:00 +000041typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
42typedef LargeMmapAllocator<> SecondaryAllocator;
43typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
44 SecondaryAllocator> Allocator;
45
46static Allocator allocator;
47static THREADLOCAL AllocatorCache cache;
48
49void InitializeAllocator() {
50 allocator.Init();
51}
52
53void AllocatorThreadFinish() {
54 allocator.SwallowCache(&cache);
55}
56
Sergey Matveev08347ca2014-08-26 14:28:28 +000057static ChunkMetadata *Metadata(const void *p) {
Sergey Matveev4e0215a2013-06-24 08:34:50 +000058 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +000059}
60
61static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
62 if (!p) return;
63 ChunkMetadata *m = Metadata(p);
64 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000065 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Sergey Matveev866abfb2013-05-20 10:54:00 +000066 m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
67 m->requested_size = size;
Sergey Matveev4e0215a2013-06-24 08:34:50 +000068 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000069}
70
71static void RegisterDeallocation(void *p) {
72 if (!p) return;
73 ChunkMetadata *m = Metadata(p);
74 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +000075 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000076}
77
78void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
79 bool cleared) {
80 if (size == 0)
81 size = 1;
82 if (size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000083 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
84 return 0;
Sergey Matveev866abfb2013-05-20 10:54:00 +000085 }
Sergey Matveev10548682013-11-24 14:28:18 +000086 void *p = allocator.Allocate(&cache, size, alignment, false);
87 // Do not rely on the allocator to clear the memory (it's slow).
88 if (cleared && allocator.FromPrimary(p))
89 memset(p, 0, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000090 RegisterAllocation(stack, p, size);
Sergey Matveev08347ca2014-08-26 14:28:28 +000091 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000092 return p;
93}
94
95void Deallocate(void *p) {
Sergey Matveev08347ca2014-08-26 14:28:28 +000096 if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
Sergey Matveev866abfb2013-05-20 10:54:00 +000097 RegisterDeallocation(p);
98 allocator.Deallocate(&cache, p);
99}
100
101void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
102 uptr alignment) {
103 RegisterDeallocation(p);
104 if (new_size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +0000105 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
106 allocator.Deallocate(&cache, p);
107 return 0;
Sergey Matveev866abfb2013-05-20 10:54:00 +0000108 }
109 p = allocator.Reallocate(&cache, p, new_size, alignment);
110 RegisterAllocation(stack, p, new_size);
111 return p;
112}
113
114void GetAllocatorCacheRange(uptr *begin, uptr *end) {
115 *begin = (uptr)&cache;
116 *end = *begin + sizeof(cache);
117}
118
Sergey Matveev08347ca2014-08-26 14:28:28 +0000119uptr GetMallocUsableSize(const void *p) {
Sergey Matveev866abfb2013-05-20 10:54:00 +0000120 ChunkMetadata *m = Metadata(p);
121 if (!m) return 0;
122 return m->requested_size;
123}
124
125///// Interface to the common LSan module. /////
126
127void LockAllocator() {
128 allocator.ForceLock();
129}
130
131void UnlockAllocator() {
132 allocator.ForceUnlock();
133}
134
135void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
136 *begin = (uptr)&allocator;
137 *end = *begin + sizeof(allocator);
138}
139
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000140uptr PointsIntoChunk(void* p) {
141 uptr addr = reinterpret_cast<uptr>(p);
142 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000143 if (!chunk) return 0;
144 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
145 // valid, but we don't want that.
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000146 if (addr < chunk) return 0;
147 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000148 CHECK(m);
Kostya Serebryany2b762782014-01-10 10:48:01 +0000149 if (!m->allocated)
150 return 0;
151 if (addr < chunk + m->requested_size)
152 return chunk;
153 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
Sergey Matveev866abfb2013-05-20 10:54:00 +0000154 return chunk;
155 return 0;
156}
157
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000158uptr GetUserBegin(uptr chunk) {
159 return chunk;
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000160}
161
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000162LsanMetadata::LsanMetadata(uptr chunk) {
163 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000164 CHECK(metadata_);
165}
166
167bool LsanMetadata::allocated() const {
168 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
169}
170
171ChunkTag LsanMetadata::tag() const {
172 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
173}
174
175void LsanMetadata::set_tag(ChunkTag value) {
176 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
177}
178
179uptr LsanMetadata::requested_size() const {
180 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
181}
182
183u32 LsanMetadata::stack_trace_id() const {
184 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
185}
186
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000187void ForEachChunk(ForEachChunkCallback callback, void *arg) {
188 allocator.ForEachChunk(callback, arg);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000189}
190
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000191IgnoreObjectResult IgnoreObjectLocked(const void *p) {
192 void *chunk = allocator.GetBlockBegin(p);
193 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
194 ChunkMetadata *m = Metadata(chunk);
195 CHECK(m);
196 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000197 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000198 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000199 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000200 return kIgnoreObjectSuccess;
201 } else {
202 return kIgnoreObjectInvalid;
203 }
204}
Sergey Matveev866abfb2013-05-20 10:54:00 +0000205} // namespace __lsan
Sergey Matveev08347ca2014-08-26 14:28:28 +0000206
207using namespace __lsan;
208
209extern "C" {
210SANITIZER_INTERFACE_ATTRIBUTE
211uptr __sanitizer_get_current_allocated_bytes() {
212 uptr stats[AllocatorStatCount];
213 allocator.GetStats(stats);
214 return stats[AllocatorStatAllocated];
215}
216
217SANITIZER_INTERFACE_ATTRIBUTE
218uptr __sanitizer_get_heap_size() {
219 uptr stats[AllocatorStatCount];
220 allocator.GetStats(stats);
221 return stats[AllocatorStatMapped];
222}
223
224SANITIZER_INTERFACE_ATTRIBUTE
225uptr __sanitizer_get_free_bytes() { return 0; }
226
227SANITIZER_INTERFACE_ATTRIBUTE
228uptr __sanitizer_get_unmapped_bytes() { return 0; }
229
230SANITIZER_INTERFACE_ATTRIBUTE
231uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
232
233SANITIZER_INTERFACE_ATTRIBUTE
234int __sanitizer_get_ownership(const void *p) { return Metadata(p) != 0; }
235
236SANITIZER_INTERFACE_ATTRIBUTE
237uptr __sanitizer_get_allocated_size(const void *p) {
238 return GetMallocUsableSize(p);
239}
240} // extern "C"