blob: 1512c2e85f258026e5ff9e96c0085bf7941d1083 [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
18#include "sanitizer_common/sanitizer_internal_defs.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "lsan_common.h"
22
23namespace __lsan {
24
Sergey Matveevd28c03c2013-06-21 15:10:20 +000025static const uptr kMaxAllowedMallocSize = 8UL << 30;
Sergey Matveev866abfb2013-05-20 10:54:00 +000026static const uptr kAllocatorSpace = 0x600000000000ULL;
Sergey Matveevd28c03c2013-06-21 15:10:20 +000027static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
Sergey Matveev866abfb2013-05-20 10:54:00 +000028
29struct ChunkMetadata {
30 bool allocated : 8; // Must be first.
31 ChunkTag tag : 2;
32 uptr requested_size : 54;
33 u32 stack_trace_id;
34};
35
36typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
37 sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator;
38typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
39typedef LargeMmapAllocator<> SecondaryAllocator;
40typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
41 SecondaryAllocator> Allocator;
42
43static Allocator allocator;
44static THREADLOCAL AllocatorCache cache;
45
46void InitializeAllocator() {
47 allocator.Init();
48}
49
50void AllocatorThreadFinish() {
51 allocator.SwallowCache(&cache);
52}
53
54static ChunkMetadata *Metadata(void *p) {
Sergey Matveev4e0215a2013-06-24 08:34:50 +000055 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +000056}
57
58static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
59 if (!p) return;
60 ChunkMetadata *m = Metadata(p);
61 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000062 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Sergey Matveev866abfb2013-05-20 10:54:00 +000063 m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
64 m->requested_size = size;
Sergey Matveev4e0215a2013-06-24 08:34:50 +000065 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000066}
67
68static void RegisterDeallocation(void *p) {
69 if (!p) return;
70 ChunkMetadata *m = Metadata(p);
71 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +000072 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000073}
74
75void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
76 bool cleared) {
77 if (size == 0)
78 size = 1;
79 if (size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000080 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
81 return 0;
Sergey Matveev866abfb2013-05-20 10:54:00 +000082 }
83 void *p = allocator.Allocate(&cache, size, alignment, cleared);
84 RegisterAllocation(stack, p, size);
85 return p;
86}
87
88void Deallocate(void *p) {
89 RegisterDeallocation(p);
90 allocator.Deallocate(&cache, p);
91}
92
93void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
94 uptr alignment) {
95 RegisterDeallocation(p);
96 if (new_size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000097 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
98 allocator.Deallocate(&cache, p);
99 return 0;
Sergey Matveev866abfb2013-05-20 10:54:00 +0000100 }
101 p = allocator.Reallocate(&cache, p, new_size, alignment);
102 RegisterAllocation(stack, p, new_size);
103 return p;
104}
105
106void GetAllocatorCacheRange(uptr *begin, uptr *end) {
107 *begin = (uptr)&cache;
108 *end = *begin + sizeof(cache);
109}
110
111uptr GetMallocUsableSize(void *p) {
112 ChunkMetadata *m = Metadata(p);
113 if (!m) return 0;
114 return m->requested_size;
115}
116
117///// Interface to the common LSan module. /////
118
119void LockAllocator() {
120 allocator.ForceLock();
121}
122
123void UnlockAllocator() {
124 allocator.ForceUnlock();
125}
126
127void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
128 *begin = (uptr)&allocator;
129 *end = *begin + sizeof(allocator);
130}
131
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000132uptr PointsIntoChunk(void* p) {
133 uptr addr = reinterpret_cast<uptr>(p);
134 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000135 if (!chunk) return 0;
136 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
137 // valid, but we don't want that.
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000138 if (addr < chunk) return 0;
139 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000140 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000141 if (m->allocated && addr < chunk + m->requested_size)
Sergey Matveev866abfb2013-05-20 10:54:00 +0000142 return chunk;
143 return 0;
144}
145
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000146uptr GetUserBegin(uptr chunk) {
147 return chunk;
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000148}
149
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000150LsanMetadata::LsanMetadata(uptr chunk) {
151 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000152 CHECK(metadata_);
153}
154
155bool LsanMetadata::allocated() const {
156 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
157}
158
159ChunkTag LsanMetadata::tag() const {
160 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
161}
162
163void LsanMetadata::set_tag(ChunkTag value) {
164 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
165}
166
167uptr LsanMetadata::requested_size() const {
168 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
169}
170
171u32 LsanMetadata::stack_trace_id() const {
172 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
173}
174
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000175void ForEachChunk(ForEachChunkCallback callback, void *arg) {
176 allocator.ForEachChunk(callback, arg);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000177}
178
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000179IgnoreObjectResult IgnoreObjectLocked(const void *p) {
180 void *chunk = allocator.GetBlockBegin(p);
181 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
182 ChunkMetadata *m = Metadata(chunk);
183 CHECK(m);
184 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000185 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000186 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000187 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000188 return kIgnoreObjectSuccess;
189 } else {
190 return kIgnoreObjectInvalid;
191 }
192}
Sergey Matveev866abfb2013-05-20 10:54:00 +0000193} // namespace __lsan