blob: f7eee1314bf02dbd9105a1c407ebcfaa2790d488 [file] [log] [blame]
Sergey Matveev866abfb2013-05-20 10:54:00 +00001//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
18#include "sanitizer_common/sanitizer_internal_defs.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "lsan_common.h"
22
Sergey Matveev10548682013-11-24 14:28:18 +000023extern "C" void *memset(void *ptr, int value, uptr num);
24
Sergey Matveev866abfb2013-05-20 10:54:00 +000025namespace __lsan {
26
Sergey Matveevd28c03c2013-06-21 15:10:20 +000027static const uptr kMaxAllowedMallocSize = 8UL << 30;
Sergey Matveev866abfb2013-05-20 10:54:00 +000028static const uptr kAllocatorSpace = 0x600000000000ULL;
Sergey Matveevd28c03c2013-06-21 15:10:20 +000029static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
Sergey Matveev866abfb2013-05-20 10:54:00 +000030
31struct ChunkMetadata {
32 bool allocated : 8; // Must be first.
33 ChunkTag tag : 2;
34 uptr requested_size : 54;
35 u32 stack_trace_id;
36};
37
38typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
Sergey Matveev10548682013-11-24 14:28:18 +000039 sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
Sergey Matveev866abfb2013-05-20 10:54:00 +000040typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
41typedef LargeMmapAllocator<> SecondaryAllocator;
42typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
43 SecondaryAllocator> Allocator;
44
45static Allocator allocator;
46static THREADLOCAL AllocatorCache cache;
47
48void InitializeAllocator() {
49 allocator.Init();
50}
51
52void AllocatorThreadFinish() {
53 allocator.SwallowCache(&cache);
54}
55
56static ChunkMetadata *Metadata(void *p) {
Sergey Matveev4e0215a2013-06-24 08:34:50 +000057 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +000058}
59
60static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
61 if (!p) return;
62 ChunkMetadata *m = Metadata(p);
63 CHECK(m);
Sergey Matveevb94d5e22013-06-21 14:51:52 +000064 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
Sergey Matveev866abfb2013-05-20 10:54:00 +000065 m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
66 m->requested_size = size;
Sergey Matveev4e0215a2013-06-24 08:34:50 +000067 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000068}
69
70static void RegisterDeallocation(void *p) {
71 if (!p) return;
72 ChunkMetadata *m = Metadata(p);
73 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +000074 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
Sergey Matveev866abfb2013-05-20 10:54:00 +000075}
76
77void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
78 bool cleared) {
79 if (size == 0)
80 size = 1;
81 if (size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +000082 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
83 return 0;
Sergey Matveev866abfb2013-05-20 10:54:00 +000084 }
Sergey Matveev10548682013-11-24 14:28:18 +000085 void *p = allocator.Allocate(&cache, size, alignment, false);
86 // Do not rely on the allocator to clear the memory (it's slow).
87 if (cleared && allocator.FromPrimary(p))
88 memset(p, 0, size);
Sergey Matveev866abfb2013-05-20 10:54:00 +000089 RegisterAllocation(stack, p, size);
90 return p;
91}
92
93void Deallocate(void *p) {
94 RegisterDeallocation(p);
95 allocator.Deallocate(&cache, p);
96}
97
98void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
99 uptr alignment) {
100 RegisterDeallocation(p);
101 if (new_size > kMaxAllowedMallocSize) {
Sergey Matveevd28c03c2013-06-21 15:10:20 +0000102 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
103 allocator.Deallocate(&cache, p);
104 return 0;
Sergey Matveev866abfb2013-05-20 10:54:00 +0000105 }
106 p = allocator.Reallocate(&cache, p, new_size, alignment);
107 RegisterAllocation(stack, p, new_size);
108 return p;
109}
110
111void GetAllocatorCacheRange(uptr *begin, uptr *end) {
112 *begin = (uptr)&cache;
113 *end = *begin + sizeof(cache);
114}
115
116uptr GetMallocUsableSize(void *p) {
117 ChunkMetadata *m = Metadata(p);
118 if (!m) return 0;
119 return m->requested_size;
120}
121
122///// Interface to the common LSan module. /////
123
124void LockAllocator() {
125 allocator.ForceLock();
126}
127
128void UnlockAllocator() {
129 allocator.ForceUnlock();
130}
131
132void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
133 *begin = (uptr)&allocator;
134 *end = *begin + sizeof(allocator);
135}
136
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000137uptr PointsIntoChunk(void* p) {
138 uptr addr = reinterpret_cast<uptr>(p);
139 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000140 if (!chunk) return 0;
141 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
142 // valid, but we don't want that.
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000143 if (addr < chunk) return 0;
144 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000145 CHECK(m);
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000146 if (m->allocated && addr < chunk + m->requested_size)
Sergey Matveev866abfb2013-05-20 10:54:00 +0000147 return chunk;
148 return 0;
149}
150
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000151uptr GetUserBegin(uptr chunk) {
152 return chunk;
Sergey Matveevbcfd8382013-05-20 13:08:23 +0000153}
154
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000155LsanMetadata::LsanMetadata(uptr chunk) {
156 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
Sergey Matveev866abfb2013-05-20 10:54:00 +0000157 CHECK(metadata_);
158}
159
160bool LsanMetadata::allocated() const {
161 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
162}
163
164ChunkTag LsanMetadata::tag() const {
165 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
166}
167
168void LsanMetadata::set_tag(ChunkTag value) {
169 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
170}
171
172uptr LsanMetadata::requested_size() const {
173 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
174}
175
176u32 LsanMetadata::stack_trace_id() const {
177 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
178}
179
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000180void ForEachChunk(ForEachChunkCallback callback, void *arg) {
181 allocator.ForEachChunk(callback, arg);
Sergey Matveev866abfb2013-05-20 10:54:00 +0000182}
183
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000184IgnoreObjectResult IgnoreObjectLocked(const void *p) {
185 void *chunk = allocator.GetBlockBegin(p);
186 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
187 ChunkMetadata *m = Metadata(chunk);
188 CHECK(m);
189 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
Sergey Matveev978460c2013-06-11 15:26:20 +0000190 if (m->tag == kIgnored)
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000191 return kIgnoreObjectAlreadyIgnored;
Sergey Matveev978460c2013-06-11 15:26:20 +0000192 m->tag = kIgnored;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000193 return kIgnoreObjectSuccess;
194 } else {
195 return kIgnoreObjectInvalid;
196 }
197}
Sergey Matveev866abfb2013-05-20 10:54:00 +0000198} // namespace __lsan