blob: bf2c2e5535859330863d58528f7acc1bfc7608ad [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
18#include "sanitizer_common/sanitizer_internal_defs.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "lsan_common.h"
22
23namespace __lsan {
24
25static const uptr kMaxAllowedMallocSize =
26 FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
27
28static const uptr kAllocatorSpace = 0x600000000000ULL;
29static const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
30
31struct ChunkMetadata {
32 bool allocated : 8; // Must be first.
33 ChunkTag tag : 2;
34 uptr requested_size : 54;
35 u32 stack_trace_id;
36};
37
38typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
39 sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator;
40typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
41typedef LargeMmapAllocator<> SecondaryAllocator;
42typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
43 SecondaryAllocator> Allocator;
44
45static Allocator allocator;
46static THREADLOCAL AllocatorCache cache;
47
48void InitializeAllocator() {
49 allocator.Init();
50}
51
52void AllocatorThreadFinish() {
53 allocator.SwallowCache(&cache);
54}
55
56static ChunkMetadata *Metadata(void *p) {
57 return (ChunkMetadata *)allocator.GetMetaData(p);
58}
59
60static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
61 if (!p) return;
62 ChunkMetadata *m = Metadata(p);
63 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000064 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Sergey Matveev866abfb2013-05-20 10:54:00 +000065 m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
66 m->requested_size = size;
67 atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed);
68}
69
70static void RegisterDeallocation(void *p) {
71 if (!p) return;
72 ChunkMetadata *m = Metadata(p);
73 CHECK(m);
74 atomic_store((atomic_uint8_t*)m, 0, memory_order_relaxed);
75}
76
77void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
78 bool cleared) {
79 if (size == 0)
80 size = 1;
81 if (size > kMaxAllowedMallocSize) {
82 Report("WARNING: LeakSanitizer failed to allocate %p bytes\n",
83 (void*)size);
84 return 0;
85 }
86 void *p = allocator.Allocate(&cache, size, alignment, cleared);
87 RegisterAllocation(stack, p, size);
88 return p;
89}
90
91void Deallocate(void *p) {
92 RegisterDeallocation(p);
93 allocator.Deallocate(&cache, p);
94}
95
96void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
97 uptr alignment) {
98 RegisterDeallocation(p);
99 if (new_size > kMaxAllowedMallocSize) {
100 Report("WARNING: LeakSanitizer failed to allocate %p bytes\n",
101 (void*)new_size);
102 allocator.Deallocate(&cache, p);
103 return 0;
104 }
105 p = allocator.Reallocate(&cache, p, new_size, alignment);
106 RegisterAllocation(stack, p, new_size);
107 return p;
108}
109
110void GetAllocatorCacheRange(uptr *begin, uptr *end) {
111 *begin = (uptr)&cache;
112 *end = *begin + sizeof(cache);
113}
114
115uptr GetMallocUsableSize(void *p) {
116 ChunkMetadata *m = Metadata(p);
117 if (!m) return 0;
118 return m->requested_size;
119}
120
121///// Interface to the common LSan module. /////
122
123void LockAllocator() {
124 allocator.ForceLock();
125}
126
127void UnlockAllocator() {
128 allocator.ForceUnlock();
129}
130
131void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
132 *begin = (uptr)&allocator;
133 *end = *begin + sizeof(allocator);
134}
135
136void *PointsIntoChunk(void* p) {
Sergey Matveev69f11802013-05-31 11:13:45 +0000137 void *chunk = allocator.GetBlockBeginFastLocked(p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000138 if (!chunk) return 0;
139 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
140 // valid, but we don't want that.
141 if (p < chunk) return 0;
142 ChunkMetadata *m = Metadata(chunk);
143 CHECK(m);
144 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size)
145 return chunk;
146 return 0;
147}
148
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000149void *GetUserBegin(void *p) {
150 return p;
151}
152
Sergey Matveev866abfb2013-05-20 10:54:00 +0000153LsanMetadata::LsanMetadata(void *chunk) {
154 metadata_ = Metadata(chunk);
155 CHECK(metadata_);
156}
157
158bool LsanMetadata::allocated() const {
159 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
160}
161
162ChunkTag LsanMetadata::tag() const {
163 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
164}
165
166void LsanMetadata::set_tag(ChunkTag value) {
167 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
168}
169
170uptr LsanMetadata::requested_size() const {
171 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
172}
173
174u32 LsanMetadata::stack_trace_id() const {
175 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
176}
177
178template<typename Callable>
179void ForEachChunk(Callable const &callback) {
180 allocator.ForEachChunk(callback);
181}
182
183template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>(
184 ProcessPlatformSpecificAllocationsCb const &callback);
185template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback);
186template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback);
187template void ForEachChunk<MarkIndirectlyLeakedCb>(
188 MarkIndirectlyLeakedCb const &callback);
Sergey Matveevb94d5e22013-06-21 14:51:52 +0000189template void ForEachChunk<CollectIgnoredCb>(
190 CollectIgnoredCb const &callback);
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000191
192IgnoreObjectResult IgnoreObjectLocked(const void *p) {
193 void *chunk = allocator.GetBlockBegin(p);
194 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
195 ChunkMetadata *m = Metadata(chunk);
196 CHECK(m);
197 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000198 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000199 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000200 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000201 return kIgnoreObjectSuccess;
202 } else {
203 return kIgnoreObjectInvalid;
204 }
205}
Sergey Matveev866abfb2013-05-20 10:54:00 +0000206} // namespace __lsan