blob: a42469b03168096183db4ac898e5c3137f539551 [file] [log] [blame]
Sergey Matveevab0f7442013-05-20 11:06:50 +00001//=-- lsan_common.cc ------------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "sanitizer_common/sanitizer_stoptheworld.h"
22
Sergey Matveev9bdf7802013-05-21 14:12:11 +000023#if CAN_SANITIZE_LEAKS
Sergey Matveev0bc81772013-05-21 15:35:34 +000024namespace __lsan {
25
Sergey Matveevcd571e02013-06-06 14:17:56 +000026// This mutex is used to prevent races between DoLeakCheck and SuppressObject.
27BlockingMutex global_mutex(LINKER_INITIALIZED);
28
Sergey Matveevab0f7442013-05-20 11:06:50 +000029Flags lsan_flags;
30
31static void InitializeFlags() {
32 Flags *f = flags();
33 // Default values.
Sergey Matveevab0f7442013-05-20 11:06:50 +000034 f->report_blocks = false;
35 f->resolution = 0;
36 f->max_leaks = 0;
Sergey Matveev969b5292013-05-24 13:16:02 +000037 f->exitcode = 23;
Sergey Matveevebe3a362013-05-27 11:41:46 +000038 f->use_registers = true;
39 f->use_globals = true;
40 f->use_stacks = true;
41 f->use_tls = true;
42 f->use_unaligned = false;
Sergey Matveevcd571e02013-06-06 14:17:56 +000043 f->verbosity = 0;
Sergey Matveevab0f7442013-05-20 11:06:50 +000044 f->log_pointers = false;
45 f->log_threads = false;
46
47 const char *options = GetEnv("LSAN_OPTIONS");
48 if (options) {
Sergey Matveevebe3a362013-05-27 11:41:46 +000049 ParseFlag(options, &f->use_registers, "use_registers");
50 ParseFlag(options, &f->use_globals, "use_globals");
51 ParseFlag(options, &f->use_stacks, "use_stacks");
52 ParseFlag(options, &f->use_tls, "use_tls");
53 ParseFlag(options, &f->use_unaligned, "use_unaligned");
Sergey Matveevab0f7442013-05-20 11:06:50 +000054 ParseFlag(options, &f->report_blocks, "report_blocks");
55 ParseFlag(options, &f->resolution, "resolution");
56 CHECK_GE(&f->resolution, 0);
57 ParseFlag(options, &f->max_leaks, "max_leaks");
58 CHECK_GE(&f->max_leaks, 0);
Sergey Matveevcd571e02013-06-06 14:17:56 +000059 ParseFlag(options, &f->verbosity, "verbosity");
Sergey Matveevab0f7442013-05-20 11:06:50 +000060 ParseFlag(options, &f->log_pointers, "log_pointers");
61 ParseFlag(options, &f->log_threads, "log_threads");
Sergey Matveev969b5292013-05-24 13:16:02 +000062 ParseFlag(options, &f->exitcode, "exitcode");
Sergey Matveevab0f7442013-05-20 11:06:50 +000063 }
64}
65
66void InitCommonLsan() {
67 InitializeFlags();
68 InitializePlatformSpecificModules();
69}
70
71static inline bool CanBeAHeapPointer(uptr p) {
72 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
73 // boundary on heap addresses.
74 const uptr kMinAddress = 4 * 4096;
75 if (p < kMinAddress) return false;
76#ifdef __x86_64__
77 // Accept only canonical form user-space addresses.
78 return ((p >> 47) == 0);
79#else
80 return true;
81#endif
82}
83
84// Scan the memory range, looking for byte patterns that point into allocator
85// chunks. Mark those chunks with tag and add them to the frontier.
Sergey Matveev5e719a72013-06-03 11:21:34 +000086// There are two usage modes for this function: finding reachable or suppressed
87// chunks (tag = kReachable or kSuppressed) and finding indirectly leaked chunks
Sergey Matveevab0f7442013-05-20 11:06:50 +000088// (tag = kIndirectlyLeaked). In the second case, there's no flood fill,
89// so frontier = 0.
90void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier,
91 const char *region_type, ChunkTag tag) {
92 const uptr alignment = flags()->pointer_alignment();
93 if (flags()->log_pointers)
94 Report("Scanning %s range %p-%p.\n", region_type, begin, end);
95 uptr pp = begin;
96 if (pp % alignment)
97 pp = pp + alignment - pp % alignment;
98 for (; pp + sizeof(uptr) <= end; pp += alignment) {
99 void *p = *reinterpret_cast<void**>(pp);
100 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
Sergey Matveevab0f7442013-05-20 11:06:50 +0000101 void *chunk = PointsIntoChunk(p);
102 if (!chunk) continue;
103 LsanMetadata m(chunk);
Sergey Matveev5e719a72013-06-03 11:21:34 +0000104 // Reachable beats suppressed beats leaked.
Sergey Matveevab0f7442013-05-20 11:06:50 +0000105 if (m.tag() == kReachable) continue;
Sergey Matveev5e719a72013-06-03 11:21:34 +0000106 if (m.tag() == kSuppressed && tag != kReachable) continue;
Sergey Matveevab0f7442013-05-20 11:06:50 +0000107 m.set_tag(tag);
108 if (flags()->log_pointers)
109 Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p,
110 chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(),
111 m.requested_size());
112 if (frontier)
113 frontier->push_back(reinterpret_cast<uptr>(chunk));
114 }
115}
116
117// Scan thread data (stacks and TLS) for heap pointers.
118static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
119 InternalVector<uptr> *frontier) {
120 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
121 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
122 uptr registers_end = registers_begin + registers.size();
123 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
124 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
125 if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
126 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
127 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
128 &tls_begin, &tls_end,
129 &cache_begin, &cache_end);
130 if (!thread_found) {
131 // If a thread can't be found in the thread registry, it's probably in the
132 // process of destruction. Log this event and move on.
133 if (flags()->log_threads)
134 Report("Thread %d not found in registry.\n", os_id);
135 continue;
136 }
137 uptr sp;
138 bool have_registers =
139 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
140 if (!have_registers) {
141 Report("Unable to get registers from thread %d.\n");
142 // If unable to get SP, consider the entire stack to be reachable.
143 sp = stack_begin;
144 }
145
Sergey Matveevebe3a362013-05-27 11:41:46 +0000146 if (flags()->use_registers && have_registers)
Sergey Matveevab0f7442013-05-20 11:06:50 +0000147 ScanRangeForPointers(registers_begin, registers_end, frontier,
148 "REGISTERS", kReachable);
149
Sergey Matveevebe3a362013-05-27 11:41:46 +0000150 if (flags()->use_stacks) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000151 if (flags()->log_threads)
152 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
153 if (sp < stack_begin || sp >= stack_end) {
154 // SP is outside the recorded stack range (e.g. the thread is running a
155 // signal handler on alternate stack). Again, consider the entire stack
156 // range to be reachable.
157 if (flags()->log_threads)
158 Report("WARNING: stack_pointer not in stack_range.\n");
159 } else {
160 // Shrink the stack range to ignore out-of-scope values.
161 stack_begin = sp;
162 }
163 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
164 kReachable);
165 }
166
Sergey Matveevebe3a362013-05-27 11:41:46 +0000167 if (flags()->use_tls) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000168 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
Sergey Matveevf5a9ace2013-05-24 18:07:53 +0000169 if (cache_begin == cache_end) {
170 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
171 } else {
172 // Because LSan should not be loaded with dlopen(), we can assume
173 // that allocator cache will be part of static TLS image.
174 CHECK_LE(tls_begin, cache_begin);
175 CHECK_GE(tls_end, cache_end);
176 if (tls_begin < cache_begin)
177 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
178 kReachable);
179 if (tls_end > cache_end)
180 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
181 }
Sergey Matveevab0f7442013-05-20 11:06:50 +0000182 }
183 }
184}
185
Sergey Matveev5e719a72013-06-03 11:21:34 +0000186static void FloodFillTag(InternalVector<uptr> *frontier, ChunkTag tag) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000187 while (frontier->size()) {
188 uptr next_chunk = frontier->back();
189 frontier->pop_back();
190 LsanMetadata m(reinterpret_cast<void *>(next_chunk));
191 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
Sergey Matveev5e719a72013-06-03 11:21:34 +0000192 "HEAP", tag);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000193 }
194}
195
196// Mark leaked chunks which are reachable from other leaked chunks.
197void MarkIndirectlyLeakedCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000198 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000199 LsanMetadata m(p);
200 if (m.allocated() && m.tag() != kReachable) {
201 ScanRangeForPointers(reinterpret_cast<uptr>(p),
202 reinterpret_cast<uptr>(p) + m.requested_size(),
203 /* frontier */ 0, "HEAP", kIndirectlyLeaked);
204 }
205}
206
Sergey Matveev5e719a72013-06-03 11:21:34 +0000207void CollectSuppressedCb::operator()(void *p) const {
208 p = GetUserBegin(p);
209 LsanMetadata m(p);
210 if (m.allocated() && m.tag() == kSuppressed)
211 frontier_->push_back(reinterpret_cast<uptr>(p));
212}
213
Sergey Matveevab0f7442013-05-20 11:06:50 +0000214// Set the appropriate tag on each chunk.
215static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
216 // Holds the flood fill frontier.
217 InternalVector<uptr> frontier(GetPageSizeCached());
218
Sergey Matveevebe3a362013-05-27 11:41:46 +0000219 if (flags()->use_globals)
Sergey Matveevab0f7442013-05-20 11:06:50 +0000220 ProcessGlobalRegions(&frontier);
221 ProcessThreads(suspended_threads, &frontier);
Sergey Matveev5e719a72013-06-03 11:21:34 +0000222 FloodFillTag(&frontier, kReachable);
223 // The check here is relatively expensive, so we do this in a separate flood
224 // fill. That way we can skip the check for chunks that are reachable
225 // otherwise.
Sergey Matveevab0f7442013-05-20 11:06:50 +0000226 ProcessPlatformSpecificAllocations(&frontier);
Sergey Matveev5e719a72013-06-03 11:21:34 +0000227 FloodFillTag(&frontier, kReachable);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000228
Sergey Matveevab0f7442013-05-20 11:06:50 +0000229 if (flags()->log_pointers)
Sergey Matveev5e719a72013-06-03 11:21:34 +0000230 Report("Scanning suppressed blocks.\n");
231 CHECK_EQ(0, frontier.size());
232 ForEachChunk(CollectSuppressedCb(&frontier));
233 FloodFillTag(&frontier, kSuppressed);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000234
Sergey Matveev5e719a72013-06-03 11:21:34 +0000235 // Iterate over leaked chunks and mark those that are reachable from other
236 // leaked chunks.
237 if (flags()->log_pointers)
238 Report("Scanning leaked blocks.\n");
239 ForEachChunk(MarkIndirectlyLeakedCb());
Sergey Matveevab0f7442013-05-20 11:06:50 +0000240}
241
242static void PrintStackTraceById(u32 stack_trace_id) {
243 CHECK(stack_trace_id);
244 uptr size = 0;
245 const uptr *trace = StackDepotGet(stack_trace_id, &size);
246 StackTrace::PrintStack(trace, size, common_flags()->symbolize,
247 common_flags()->strip_path_prefix, 0);
248}
249
Sergey Matveevab0f7442013-05-20 11:06:50 +0000250void CollectLeaksCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000251 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000252 LsanMetadata m(p);
253 if (!m.allocated()) return;
Sergey Matveev5e719a72013-06-03 11:21:34 +0000254 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000255 uptr resolution = flags()->resolution;
256 if (resolution > 0) {
257 uptr size = 0;
258 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
259 size = Min(size, resolution);
260 leak_report_->Add(StackDepotPut(trace, size), m.requested_size(),
261 m.tag());
262 } else {
263 leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag());
264 }
265 }
266}
267
268static void CollectLeaks(LeakReport *leak_report) {
269 ForEachChunk(CollectLeaksCb(leak_report));
270}
271
272void PrintLeakedCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000273 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000274 LsanMetadata m(p);
275 if (!m.allocated()) return;
Sergey Matveev5e719a72013-06-03 11:21:34 +0000276 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000277 Printf("%s leaked %llu byte block at %p\n",
278 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
279 m.requested_size(), p);
280 }
281}
282
283static void PrintLeaked() {
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000284 Printf("Reporting individual blocks:\n");
285 Printf("============================\n");
Sergey Matveevab0f7442013-05-20 11:06:50 +0000286 ForEachChunk(PrintLeakedCb());
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000287 Printf("\n");
Sergey Matveevab0f7442013-05-20 11:06:50 +0000288}
289
Sergey Matveev969b5292013-05-24 13:16:02 +0000290enum LeakCheckResult {
291 kFatalError,
292 kLeaksFound,
293 kNoLeaks
294};
295
Sergey Matveevab0f7442013-05-20 11:06:50 +0000296static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
297 void *arg) {
Sergey Matveev969b5292013-05-24 13:16:02 +0000298 LeakCheckResult *result = reinterpret_cast<LeakCheckResult *>(arg);
299 CHECK_EQ(*result, kFatalError);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000300 ClassifyAllChunks(suspended_threads);
301 LeakReport leak_report;
302 CollectLeaks(&leak_report);
Sergey Matveev969b5292013-05-24 13:16:02 +0000303 if (leak_report.IsEmpty()) {
304 *result = kNoLeaks;
305 return;
Sergey Matveevab0f7442013-05-20 11:06:50 +0000306 }
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000307 Printf("\n");
308 Printf("=================================================================\n");
Sergey Matveevebe3a362013-05-27 11:41:46 +0000309 Report("ERROR: LeakSanitizer: detected memory leaks\n");
Sergey Matveev969b5292013-05-24 13:16:02 +0000310 leak_report.PrintLargest(flags()->max_leaks);
311 if (flags()->report_blocks)
312 PrintLeaked();
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000313 leak_report.PrintSummary();
314 Printf("\n");
Sergey Matveev969b5292013-05-24 13:16:02 +0000315 *result = kLeaksFound;
Sergey Matveevab0f7442013-05-20 11:06:50 +0000316}
317
318void DoLeakCheck() {
Sergey Matveevcd571e02013-06-06 14:17:56 +0000319 BlockingMutexLock l(&global_mutex);
Sergey Matveev5e719a72013-06-03 11:21:34 +0000320 static bool already_done;
Sergey Matveevcd571e02013-06-06 14:17:56 +0000321 CHECK(!already_done);
322 already_done = true;
Sergey Matveev969b5292013-05-24 13:16:02 +0000323 LeakCheckResult result = kFatalError;
Sergey Matveev5e719a72013-06-03 11:21:34 +0000324 LockThreadRegistry();
325 LockAllocator();
Sergey Matveev5e719a72013-06-03 11:21:34 +0000326 StopTheWorld(DoLeakCheckCallback, &result);
327 UnlockAllocator();
328 UnlockThreadRegistry();
Sergey Matveev969b5292013-05-24 13:16:02 +0000329 if (result == kFatalError) {
330 Report("LeakSanitizer has encountered a fatal error.\n");
331 Die();
332 } else if (result == kLeaksFound) {
333 if (flags()->exitcode)
334 internal__exit(flags()->exitcode);
335 }
Sergey Matveevab0f7442013-05-20 11:06:50 +0000336}
337
Sergey Matveevab0f7442013-05-20 11:06:50 +0000338///// LeakReport implementation. /////
339
340// A hard limit on the number of distinct leaks, to avoid quadratic complexity
341// in LeakReport::Add(). We don't expect to ever see this many leaks in
342// real-world applications.
343// FIXME: Get rid of this limit by changing the implementation of LeakReport to
344// use a hash table.
345const uptr kMaxLeaksConsidered = 1000;
346
347void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
348 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
349 bool is_directly_leaked = (tag == kDirectlyLeaked);
350 for (uptr i = 0; i < leaks_.size(); i++)
351 if (leaks_[i].stack_trace_id == stack_trace_id &&
352 leaks_[i].is_directly_leaked == is_directly_leaked) {
353 leaks_[i].hit_count++;
354 leaks_[i].total_size += leaked_size;
355 return;
356 }
357 if (leaks_.size() == kMaxLeaksConsidered) return;
358 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
359 is_directly_leaked };
360 leaks_.push_back(leak);
361}
362
363static bool IsLarger(const Leak &leak1, const Leak &leak2) {
364 return leak1.total_size > leak2.total_size;
365}
366
367void LeakReport::PrintLargest(uptr max_leaks) {
368 CHECK(leaks_.size() <= kMaxLeaksConsidered);
369 Printf("\n");
370 if (leaks_.size() == kMaxLeaksConsidered)
371 Printf("Too many leaks! Only the first %llu leaks encountered will be "
372 "reported.\n",
373 kMaxLeaksConsidered);
374 if (max_leaks > 0 && max_leaks < leaks_.size())
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000375 Printf("The %llu largest leak(s):\n", max_leaks);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000376 InternalSort(&leaks_, leaks_.size(), IsLarger);
377 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size();
378 for (uptr i = 0; i < max_leaks; i++) {
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000379 Printf("%s leak of %llu byte(s) in %llu object(s) allocated from:\n",
Sergey Matveevab0f7442013-05-20 11:06:50 +0000380 leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000381 leaks_[i].total_size, leaks_[i].hit_count);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000382 PrintStackTraceById(leaks_[i].stack_trace_id);
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000383 Printf("\n");
Sergey Matveevab0f7442013-05-20 11:06:50 +0000384 }
385 if (max_leaks < leaks_.size()) {
386 uptr remaining = leaks_.size() - max_leaks;
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000387 Printf("Omitting %llu more leak(s).\n", remaining);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000388 }
389}
Sergey Matveev0bc81772013-05-21 15:35:34 +0000390
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000391void LeakReport::PrintSummary() {
392 CHECK(leaks_.size() <= kMaxLeaksConsidered);
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000393 uptr bytes = 0, allocations = 0;
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000394 for (uptr i = 0; i < leaks_.size(); i++) {
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000395 bytes += leaks_[i].total_size;
396 allocations += leaks_[i].hit_count;
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000397 }
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000398 Printf("SUMMARY: LeakSanitizer: %llu byte(s) leaked in %llu allocation(s).\n",
399 bytes, allocations);
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000400}
Sergey Matveev5e719a72013-06-03 11:21:34 +0000401
Sergey Matveevab0f7442013-05-20 11:06:50 +0000402} // namespace __lsan
Sergey Matveevcd571e02013-06-06 14:17:56 +0000403
404using namespace __lsan; // NOLINT
405
406extern "C" {
407void __lsan_ignore_object(const void *p) {
408 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
409 // locked.
410 BlockingMutexLock l(&global_mutex);
411 IgnoreObjectResult res = IgnoreObjectLocked(p);
412 if (res == kIgnoreObjectInvalid && flags()->verbosity >= 1)
413 Report("__lsan_ignore_object(): no heap object found at %p", p);
414 if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 1)
415 Report("__lsan_ignore_object(): "
416 "heap object at %p is already being ignored\n", p);
417 if (res == kIgnoreObjectSuccess && flags()->verbosity >= 2)
418 Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
419}
420} // extern "C"
Sergey Matveev0bc81772013-05-21 15:35:34 +0000421#endif // CAN_SANITIZE_LEAKS