blob: 2bdb4a2b005f383e76dd832af2b411c17985d9be [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
18#include "sanitizer_common/sanitizer_internal_defs.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "lsan_common.h"
22
23namespace __lsan {
24
Sergey Matveevd28c03c2013-06-21 15:10:20 +000025static const uptr kMaxAllowedMallocSize = 8UL << 30;
Sergey Matveev866abfb2013-05-20 10:54:00 +000026static const uptr kAllocatorSpace = 0x600000000000ULL;
Sergey Matveevd28c03c2013-06-21 15:10:20 +000027static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
Sergey Matveev866abfb2013-05-20 10:54:00 +000028
29struct ChunkMetadata {
30 bool allocated : 8; // Must be first.
31 ChunkTag tag : 2;
32 uptr requested_size : 54;
33 u32 stack_trace_id;
34};
35
36typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
37 sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator;
38typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
39typedef LargeMmapAllocator<> SecondaryAllocator;
40typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
41 SecondaryAllocator> Allocator;
42
43static Allocator allocator;
44static THREADLOCAL AllocatorCache cache;
45
46void InitializeAllocator() {
47 allocator.Init();
48}
49
50void AllocatorThreadFinish() {
51 allocator.SwallowCache(&cache);
52}
53
54static ChunkMetadata *Metadata(void *p) {
55 return (ChunkMetadata *)allocator.GetMetaData(p);
56}
57
58static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
59 if (!p) return;
60 ChunkMetadata *m = Metadata(p);
61 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000062 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Sergey Matveev866abfb2013-05-20 10:54:00 +000063 m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
64 m->requested_size = size;
65 atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed);
66}
67
68static void RegisterDeallocation(void *p) {
69 if (!p) return;
70 ChunkMetadata *m = Metadata(p);
71 CHECK(m);
72 atomic_store((atomic_uint8_t*)m, 0, memory_order_relaxed);
73}
74
75void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
76 bool cleared) {
77 if (size == 0)
78 size = 1;
79 if (size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000080 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
81 return 0;
Sergey Matveev866abfb2013-05-20 10:54:00 +000082 }
83 void *p = allocator.Allocate(&cache, size, alignment, cleared);
84 RegisterAllocation(stack, p, size);
85 return p;
86}
87
88void Deallocate(void *p) {
89 RegisterDeallocation(p);
90 allocator.Deallocate(&cache, p);
91}
92
93void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
94 uptr alignment) {
95 RegisterDeallocation(p);
96 if (new_size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000097 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
98 allocator.Deallocate(&cache, p);
99 return 0;
Sergey Matveev866abfb2013-05-20 10:54:00 +0000100 }
101 p = allocator.Reallocate(&cache, p, new_size, alignment);
102 RegisterAllocation(stack, p, new_size);
103 return p;
104}
105
106void GetAllocatorCacheRange(uptr *begin, uptr *end) {
107 *begin = (uptr)&cache;
108 *end = *begin + sizeof(cache);
109}
110
111uptr GetMallocUsableSize(void *p) {
112 ChunkMetadata *m = Metadata(p);
113 if (!m) return 0;
114 return m->requested_size;
115}
116
117///// Interface to the common LSan module. /////
118
119void LockAllocator() {
120 allocator.ForceLock();
121}
122
123void UnlockAllocator() {
124 allocator.ForceUnlock();
125}
126
127void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
128 *begin = (uptr)&allocator;
129 *end = *begin + sizeof(allocator);
130}
131
132void *PointsIntoChunk(void* p) {
Sergey Matveev69f11802013-05-31 11:13:45 +0000133 void *chunk = allocator.GetBlockBeginFastLocked(p);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000134 if (!chunk) return 0;
135 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
136 // valid, but we don't want that.
137 if (p < chunk) return 0;
138 ChunkMetadata *m = Metadata(chunk);
139 CHECK(m);
140 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size)
141 return chunk;
142 return 0;
143}
144
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000145void *GetUserBegin(void *p) {
146 return p;
147}
148
Sergey Matveev866abfb2013-05-20 10:54:00 +0000149LsanMetadata::LsanMetadata(void *chunk) {
150 metadata_ = Metadata(chunk);
151 CHECK(metadata_);
152}
153
154bool LsanMetadata::allocated() const {
155 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
156}
157
158ChunkTag LsanMetadata::tag() const {
159 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
160}
161
162void LsanMetadata::set_tag(ChunkTag value) {
163 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
164}
165
166uptr LsanMetadata::requested_size() const {
167 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
168}
169
170u32 LsanMetadata::stack_trace_id() const {
171 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
172}
173
174template<typename Callable>
175void ForEachChunk(Callable const &callback) {
176 allocator.ForEachChunk(callback);
177}
178
179template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>(
180 ProcessPlatformSpecificAllocationsCb const &callback);
181template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback);
182template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback);
183template void ForEachChunk<MarkIndirectlyLeakedCb>(
184 MarkIndirectlyLeakedCb const &callback);
Sergey Matveevb94d5e22013-06-21 14:51:52 +0000185template void ForEachChunk<CollectIgnoredCb>(
186 CollectIgnoredCb const &callback);
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000187
188IgnoreObjectResult IgnoreObjectLocked(const void *p) {
189 void *chunk = allocator.GetBlockBegin(p);
190 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
191 ChunkMetadata *m = Metadata(chunk);
192 CHECK(m);
193 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000194 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000195 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000196 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000197 return kIgnoreObjectSuccess;
198 } else {
199 return kIgnoreObjectInvalid;
200 }
201}
Sergey Matveev866abfb2013-05-20 10:54:00 +0000202} // namespace __lsan