blob: 341673fee900bc424444a995e69a22a52bb997b4 [file] [log] [blame]
Sergey Matveevab0f7442013-05-20 11:06:50 +00001//=-- lsan_common.cc ------------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "sanitizer_common/sanitizer_stoptheworld.h"
22
Sergey Matveev0bc81772013-05-21 15:35:34 +000023namespace __lsan {
Sergey Matveevca74cff2013-06-21 15:14:57 +000024#if CAN_SANITIZE_LEAKS
Sergey Matveev0bc81772013-05-21 15:35:34 +000025
Sergey Matveevcd571e02013-06-06 14:17:56 +000026// This mutex is used to prevent races between DoLeakCheck and SuppressObject.
27BlockingMutex global_mutex(LINKER_INITIALIZED);
28
Sergey Matveev200afbd2013-06-21 14:51:52 +000029THREADLOCAL int disable_counter;
Sergey Matveev200afbd2013-06-21 14:51:52 +000030
Sergey Matveevab0f7442013-05-20 11:06:50 +000031Flags lsan_flags;
32
33static void InitializeFlags() {
34 Flags *f = flags();
35 // Default values.
Sergey Matveevb3b46da2013-06-11 15:26:20 +000036 f->report_objects = false;
Sergey Matveevab0f7442013-05-20 11:06:50 +000037 f->resolution = 0;
38 f->max_leaks = 0;
Sergey Matveev969b5292013-05-24 13:16:02 +000039 f->exitcode = 23;
Sergey Matveevebe3a362013-05-27 11:41:46 +000040 f->use_registers = true;
41 f->use_globals = true;
42 f->use_stacks = true;
43 f->use_tls = true;
44 f->use_unaligned = false;
Sergey Matveevcd571e02013-06-06 14:17:56 +000045 f->verbosity = 0;
Sergey Matveevab0f7442013-05-20 11:06:50 +000046 f->log_pointers = false;
47 f->log_threads = false;
48
49 const char *options = GetEnv("LSAN_OPTIONS");
50 if (options) {
Sergey Matveevebe3a362013-05-27 11:41:46 +000051 ParseFlag(options, &f->use_registers, "use_registers");
52 ParseFlag(options, &f->use_globals, "use_globals");
53 ParseFlag(options, &f->use_stacks, "use_stacks");
54 ParseFlag(options, &f->use_tls, "use_tls");
55 ParseFlag(options, &f->use_unaligned, "use_unaligned");
Sergey Matveevb3b46da2013-06-11 15:26:20 +000056 ParseFlag(options, &f->report_objects, "report_objects");
Sergey Matveevab0f7442013-05-20 11:06:50 +000057 ParseFlag(options, &f->resolution, "resolution");
58 CHECK_GE(&f->resolution, 0);
59 ParseFlag(options, &f->max_leaks, "max_leaks");
60 CHECK_GE(&f->max_leaks, 0);
Sergey Matveevcd571e02013-06-06 14:17:56 +000061 ParseFlag(options, &f->verbosity, "verbosity");
Sergey Matveevab0f7442013-05-20 11:06:50 +000062 ParseFlag(options, &f->log_pointers, "log_pointers");
63 ParseFlag(options, &f->log_threads, "log_threads");
Sergey Matveev969b5292013-05-24 13:16:02 +000064 ParseFlag(options, &f->exitcode, "exitcode");
Sergey Matveevab0f7442013-05-20 11:06:50 +000065 }
66}
67
68void InitCommonLsan() {
69 InitializeFlags();
70 InitializePlatformSpecificModules();
71}
72
73static inline bool CanBeAHeapPointer(uptr p) {
74 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
75 // boundary on heap addresses.
76 const uptr kMinAddress = 4 * 4096;
77 if (p < kMinAddress) return false;
78#ifdef __x86_64__
79 // Accept only canonical form user-space addresses.
80 return ((p >> 47) == 0);
81#else
82 return true;
83#endif
84}
85
86// Scan the memory range, looking for byte patterns that point into allocator
87// chunks. Mark those chunks with tag and add them to the frontier.
Sergey Matveev200afbd2013-06-21 14:51:52 +000088// There are two usage modes for this function: finding reachable or ignored
Sergey Matveevb3b46da2013-06-11 15:26:20 +000089// chunks (tag = kReachable or kIgnored) and finding indirectly leaked chunks
Sergey Matveevab0f7442013-05-20 11:06:50 +000090// (tag = kIndirectlyLeaked). In the second case, there's no flood fill,
91// so frontier = 0.
Alexey Samsonova64d4352013-06-14 09:59:40 +000092void ScanRangeForPointers(uptr begin, uptr end,
Alexey Samsonovdbeb48d2013-06-14 10:07:56 +000093 Frontier *frontier,
Sergey Matveevab0f7442013-05-20 11:06:50 +000094 const char *region_type, ChunkTag tag) {
95 const uptr alignment = flags()->pointer_alignment();
96 if (flags()->log_pointers)
97 Report("Scanning %s range %p-%p.\n", region_type, begin, end);
98 uptr pp = begin;
99 if (pp % alignment)
100 pp = pp + alignment - pp % alignment;
Sergey Matveevef89d6b2013-06-21 15:10:20 +0000101 for (; pp + sizeof(void *) <= end; pp += alignment) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000102 void *p = *reinterpret_cast<void**>(pp);
103 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
Sergey Matveevab0f7442013-05-20 11:06:50 +0000104 void *chunk = PointsIntoChunk(p);
105 if (!chunk) continue;
106 LsanMetadata m(chunk);
Sergey Matveev200afbd2013-06-21 14:51:52 +0000107 // Reachable beats ignored beats leaked.
Sergey Matveevab0f7442013-05-20 11:06:50 +0000108 if (m.tag() == kReachable) continue;
Sergey Matveevb3b46da2013-06-11 15:26:20 +0000109 if (m.tag() == kIgnored && tag != kReachable) continue;
Sergey Matveevab0f7442013-05-20 11:06:50 +0000110 m.set_tag(tag);
111 if (flags()->log_pointers)
Sergey Matveevef89d6b2013-06-21 15:10:20 +0000112 Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
Sergey Matveevab0f7442013-05-20 11:06:50 +0000113 chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(),
114 m.requested_size());
115 if (frontier)
116 frontier->push_back(reinterpret_cast<uptr>(chunk));
117 }
118}
119
120// Scan thread data (stacks and TLS) for heap pointers.
121static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Alexey Samsonovdbeb48d2013-06-14 10:07:56 +0000122 Frontier *frontier) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000123 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
124 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
125 uptr registers_end = registers_begin + registers.size();
126 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
127 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
128 if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
129 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
130 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
131 &tls_begin, &tls_end,
132 &cache_begin, &cache_end);
133 if (!thread_found) {
134 // If a thread can't be found in the thread registry, it's probably in the
135 // process of destruction. Log this event and move on.
136 if (flags()->log_threads)
137 Report("Thread %d not found in registry.\n", os_id);
138 continue;
139 }
140 uptr sp;
141 bool have_registers =
142 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
143 if (!have_registers) {
144 Report("Unable to get registers from thread %d.\n");
145 // If unable to get SP, consider the entire stack to be reachable.
146 sp = stack_begin;
147 }
148
Sergey Matveevebe3a362013-05-27 11:41:46 +0000149 if (flags()->use_registers && have_registers)
Sergey Matveevab0f7442013-05-20 11:06:50 +0000150 ScanRangeForPointers(registers_begin, registers_end, frontier,
151 "REGISTERS", kReachable);
152
Sergey Matveevebe3a362013-05-27 11:41:46 +0000153 if (flags()->use_stacks) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000154 if (flags()->log_threads)
155 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
156 if (sp < stack_begin || sp >= stack_end) {
157 // SP is outside the recorded stack range (e.g. the thread is running a
158 // signal handler on alternate stack). Again, consider the entire stack
159 // range to be reachable.
160 if (flags()->log_threads)
161 Report("WARNING: stack_pointer not in stack_range.\n");
162 } else {
163 // Shrink the stack range to ignore out-of-scope values.
164 stack_begin = sp;
165 }
166 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
167 kReachable);
168 }
169
Sergey Matveevebe3a362013-05-27 11:41:46 +0000170 if (flags()->use_tls) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000171 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
Sergey Matveevf5a9ace2013-05-24 18:07:53 +0000172 if (cache_begin == cache_end) {
173 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
174 } else {
175 // Because LSan should not be loaded with dlopen(), we can assume
176 // that allocator cache will be part of static TLS image.
177 CHECK_LE(tls_begin, cache_begin);
178 CHECK_GE(tls_end, cache_end);
179 if (tls_begin < cache_begin)
180 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
181 kReachable);
182 if (tls_end > cache_end)
183 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
184 }
Sergey Matveevab0f7442013-05-20 11:06:50 +0000185 }
186 }
187}
188
Alexey Samsonovdbeb48d2013-06-14 10:07:56 +0000189static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000190 while (frontier->size()) {
191 uptr next_chunk = frontier->back();
192 frontier->pop_back();
193 LsanMetadata m(reinterpret_cast<void *>(next_chunk));
194 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
Sergey Matveev5e719a72013-06-03 11:21:34 +0000195 "HEAP", tag);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000196 }
197}
198
199// Mark leaked chunks which are reachable from other leaked chunks.
200void MarkIndirectlyLeakedCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000201 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000202 LsanMetadata m(p);
203 if (m.allocated() && m.tag() != kReachable) {
204 ScanRangeForPointers(reinterpret_cast<uptr>(p),
205 reinterpret_cast<uptr>(p) + m.requested_size(),
206 /* frontier */ 0, "HEAP", kIndirectlyLeaked);
207 }
208}
209
Sergey Matveev200afbd2013-06-21 14:51:52 +0000210void CollectIgnoredCb::operator()(void *p) const {
Sergey Matveev5e719a72013-06-03 11:21:34 +0000211 p = GetUserBegin(p);
212 LsanMetadata m(p);
Sergey Matveevb3b46da2013-06-11 15:26:20 +0000213 if (m.allocated() && m.tag() == kIgnored)
Sergey Matveev5e719a72013-06-03 11:21:34 +0000214 frontier_->push_back(reinterpret_cast<uptr>(p));
215}
216
Sergey Matveevab0f7442013-05-20 11:06:50 +0000217// Set the appropriate tag on each chunk.
218static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
219 // Holds the flood fill frontier.
Alexey Samsonovdbeb48d2013-06-14 10:07:56 +0000220 Frontier frontier(GetPageSizeCached());
Sergey Matveevab0f7442013-05-20 11:06:50 +0000221
Sergey Matveevebe3a362013-05-27 11:41:46 +0000222 if (flags()->use_globals)
Sergey Matveevab0f7442013-05-20 11:06:50 +0000223 ProcessGlobalRegions(&frontier);
224 ProcessThreads(suspended_threads, &frontier);
Sergey Matveev5e719a72013-06-03 11:21:34 +0000225 FloodFillTag(&frontier, kReachable);
226 // The check here is relatively expensive, so we do this in a separate flood
227 // fill. That way we can skip the check for chunks that are reachable
228 // otherwise.
Sergey Matveevab0f7442013-05-20 11:06:50 +0000229 ProcessPlatformSpecificAllocations(&frontier);
Sergey Matveev5e719a72013-06-03 11:21:34 +0000230 FloodFillTag(&frontier, kReachable);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000231
Sergey Matveevab0f7442013-05-20 11:06:50 +0000232 if (flags()->log_pointers)
Sergey Matveevb3b46da2013-06-11 15:26:20 +0000233 Report("Scanning ignored chunks.\n");
Sergey Matveev5e719a72013-06-03 11:21:34 +0000234 CHECK_EQ(0, frontier.size());
Sergey Matveev200afbd2013-06-21 14:51:52 +0000235 ForEachChunk(CollectIgnoredCb(&frontier));
Sergey Matveevb3b46da2013-06-11 15:26:20 +0000236 FloodFillTag(&frontier, kIgnored);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000237
Sergey Matveev5e719a72013-06-03 11:21:34 +0000238 // Iterate over leaked chunks and mark those that are reachable from other
239 // leaked chunks.
240 if (flags()->log_pointers)
Sergey Matveevb3b46da2013-06-11 15:26:20 +0000241 Report("Scanning leaked chunks.\n");
Sergey Matveev5e719a72013-06-03 11:21:34 +0000242 ForEachChunk(MarkIndirectlyLeakedCb());
Sergey Matveevab0f7442013-05-20 11:06:50 +0000243}
244
245static void PrintStackTraceById(u32 stack_trace_id) {
246 CHECK(stack_trace_id);
247 uptr size = 0;
248 const uptr *trace = StackDepotGet(stack_trace_id, &size);
249 StackTrace::PrintStack(trace, size, common_flags()->symbolize,
250 common_flags()->strip_path_prefix, 0);
251}
252
Sergey Matveevab0f7442013-05-20 11:06:50 +0000253void CollectLeaksCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000254 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000255 LsanMetadata m(p);
256 if (!m.allocated()) return;
Sergey Matveev5e719a72013-06-03 11:21:34 +0000257 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
Sergey Matveevab0f7442013-05-20 11:06:50 +0000258 uptr resolution = flags()->resolution;
259 if (resolution > 0) {
260 uptr size = 0;
261 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
262 size = Min(size, resolution);
263 leak_report_->Add(StackDepotPut(trace, size), m.requested_size(),
264 m.tag());
265 } else {
266 leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag());
267 }
268 }
269}
270
271static void CollectLeaks(LeakReport *leak_report) {
272 ForEachChunk(CollectLeaksCb(leak_report));
273}
274
275void PrintLeakedCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000276 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000277 LsanMetadata m(p);
278 if (!m.allocated()) return;
Sergey Matveev5e719a72013-06-03 11:21:34 +0000279 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
Sergey Matveevef89d6b2013-06-21 15:10:20 +0000280 Printf("%s leaked %zu byte object at %p.\n",
Sergey Matveevab0f7442013-05-20 11:06:50 +0000281 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
282 m.requested_size(), p);
283 }
284}
285
286static void PrintLeaked() {
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000287 Printf("\n");
Sergey Matveevc085fe82013-06-19 14:04:11 +0000288 Printf("Reporting individual objects:\n");
289 ForEachChunk(PrintLeakedCb());
Sergey Matveevab0f7442013-05-20 11:06:50 +0000290}
291
Sergey Matveevc085fe82013-06-19 14:04:11 +0000292struct DoLeakCheckParam {
293 bool success;
294 LeakReport leak_report;
Sergey Matveev969b5292013-05-24 13:16:02 +0000295};
296
Sergey Matveevab0f7442013-05-20 11:06:50 +0000297static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
298 void *arg) {
Sergey Matveevc085fe82013-06-19 14:04:11 +0000299 DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
300 CHECK(param);
301 CHECK(!param->success);
302 CHECK(param->leak_report.IsEmpty());
Sergey Matveevab0f7442013-05-20 11:06:50 +0000303 ClassifyAllChunks(suspended_threads);
Sergey Matveevc085fe82013-06-19 14:04:11 +0000304 CollectLeaks(&param->leak_report);
305 if (!param->leak_report.IsEmpty() && flags()->report_objects)
Sergey Matveev969b5292013-05-24 13:16:02 +0000306 PrintLeaked();
Sergey Matveevc085fe82013-06-19 14:04:11 +0000307 param->success = true;
Sergey Matveevab0f7442013-05-20 11:06:50 +0000308}
309
310void DoLeakCheck() {
Sergey Matveevcd571e02013-06-06 14:17:56 +0000311 BlockingMutexLock l(&global_mutex);
Sergey Matveev5e719a72013-06-03 11:21:34 +0000312 static bool already_done;
Sergey Matveevcd571e02013-06-06 14:17:56 +0000313 CHECK(!already_done);
314 already_done = true;
Sergey Matveevc085fe82013-06-19 14:04:11 +0000315
316 DoLeakCheckParam param;
317 param.success = false;
Sergey Matveev5e719a72013-06-03 11:21:34 +0000318 LockThreadRegistry();
319 LockAllocator();
Sergey Matveevc085fe82013-06-19 14:04:11 +0000320 StopTheWorld(DoLeakCheckCallback, &param);
Sergey Matveev5e719a72013-06-03 11:21:34 +0000321 UnlockAllocator();
322 UnlockThreadRegistry();
Sergey Matveevc085fe82013-06-19 14:04:11 +0000323
324 if (!param.success) {
Sergey Matveev969b5292013-05-24 13:16:02 +0000325 Report("LeakSanitizer has encountered a fatal error.\n");
326 Die();
Sergey Matveevc085fe82013-06-19 14:04:11 +0000327 }
328 if (!param.leak_report.IsEmpty()) {
329 Printf("\n================================================================="
330 "\n");
331 Report("ERROR: LeakSanitizer: detected memory leaks\n");
332 param.leak_report.PrintLargest(flags()->max_leaks);
333 param.leak_report.PrintSummary();
Sergey Matveev969b5292013-05-24 13:16:02 +0000334 if (flags()->exitcode)
335 internal__exit(flags()->exitcode);
336 }
Sergey Matveevab0f7442013-05-20 11:06:50 +0000337}
338
Sergey Matveevab0f7442013-05-20 11:06:50 +0000339///// LeakReport implementation. /////
340
341// A hard limit on the number of distinct leaks, to avoid quadratic complexity
342// in LeakReport::Add(). We don't expect to ever see this many leaks in
343// real-world applications.
344// FIXME: Get rid of this limit by changing the implementation of LeakReport to
345// use a hash table.
346const uptr kMaxLeaksConsidered = 1000;
347
348void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
349 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
350 bool is_directly_leaked = (tag == kDirectlyLeaked);
351 for (uptr i = 0; i < leaks_.size(); i++)
352 if (leaks_[i].stack_trace_id == stack_trace_id &&
353 leaks_[i].is_directly_leaked == is_directly_leaked) {
354 leaks_[i].hit_count++;
355 leaks_[i].total_size += leaked_size;
356 return;
357 }
358 if (leaks_.size() == kMaxLeaksConsidered) return;
359 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
360 is_directly_leaked };
361 leaks_.push_back(leak);
362}
363
364static bool IsLarger(const Leak &leak1, const Leak &leak2) {
365 return leak1.total_size > leak2.total_size;
366}
367
368void LeakReport::PrintLargest(uptr max_leaks) {
369 CHECK(leaks_.size() <= kMaxLeaksConsidered);
370 Printf("\n");
371 if (leaks_.size() == kMaxLeaksConsidered)
Sergey Matveevef89d6b2013-06-21 15:10:20 +0000372 Printf("Too many leaks! Only the first %zu leaks encountered will be "
Sergey Matveevab0f7442013-05-20 11:06:50 +0000373 "reported.\n",
374 kMaxLeaksConsidered);
375 if (max_leaks > 0 && max_leaks < leaks_.size())
Sergey Matveevef89d6b2013-06-21 15:10:20 +0000376 Printf("The %zu largest leak(s):\n", max_leaks);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000377 InternalSort(&leaks_, leaks_.size(), IsLarger);
378 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size();
379 for (uptr i = 0; i < max_leaks; i++) {
Sergey Matveevef89d6b2013-06-21 15:10:20 +0000380 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
Sergey Matveevab0f7442013-05-20 11:06:50 +0000381 leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000382 leaks_[i].total_size, leaks_[i].hit_count);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000383 PrintStackTraceById(leaks_[i].stack_trace_id);
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000384 Printf("\n");
Sergey Matveevab0f7442013-05-20 11:06:50 +0000385 }
386 if (max_leaks < leaks_.size()) {
387 uptr remaining = leaks_.size() - max_leaks;
Sergey Matveevef89d6b2013-06-21 15:10:20 +0000388 Printf("Omitting %zu more leak(s).\n", remaining);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000389 }
390}
Sergey Matveev0bc81772013-05-21 15:35:34 +0000391
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000392void LeakReport::PrintSummary() {
393 CHECK(leaks_.size() <= kMaxLeaksConsidered);
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000394 uptr bytes = 0, allocations = 0;
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000395 for (uptr i = 0; i < leaks_.size(); i++) {
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000396 bytes += leaks_[i].total_size;
397 allocations += leaks_[i].hit_count;
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000398 }
Sergey Matveev200afbd2013-06-21 14:51:52 +0000399 Printf(
Sergey Matveevef89d6b2013-06-21 15:10:20 +0000400 "SUMMARY: LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).\n\n",
Sergey Matveev200afbd2013-06-21 14:51:52 +0000401 bytes, allocations);
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000402}
Sergey Matveev9b618a72013-06-20 13:39:42 +0000403#endif // CAN_SANITIZE_LEAKS
Sergey Matveevcd571e02013-06-06 14:17:56 +0000404
Sergey Matveevca74cff2013-06-21 15:14:57 +0000405bool DisabledInThisThread() {
406#ifdef CAN_SANITIZE_LEAKS
407 return disable_counter > 0;
408#endif
409 return false;
410}
411} // namespace __lsan
412
Sergey Matveevcd571e02013-06-06 14:17:56 +0000413using namespace __lsan; // NOLINT
414
415extern "C" {
Sergey Matveev46ed75f2013-06-06 18:40:55 +0000416SANITIZER_INTERFACE_ATTRIBUTE
Sergey Matveevcd571e02013-06-06 14:17:56 +0000417void __lsan_ignore_object(const void *p) {
Sergey Matveev9b618a72013-06-20 13:39:42 +0000418#if CAN_SANITIZE_LEAKS
Sergey Matveevcd571e02013-06-06 14:17:56 +0000419 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
420 // locked.
421 BlockingMutexLock l(&global_mutex);
422 IgnoreObjectResult res = IgnoreObjectLocked(p);
423 if (res == kIgnoreObjectInvalid && flags()->verbosity >= 1)
424 Report("__lsan_ignore_object(): no heap object found at %p", p);
425 if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 1)
426 Report("__lsan_ignore_object(): "
427 "heap object at %p is already being ignored\n", p);
428 if (res == kIgnoreObjectSuccess && flags()->verbosity >= 2)
429 Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
Sergey Matveev9b618a72013-06-20 13:39:42 +0000430#endif // CAN_SANITIZE_LEAKS
Sergey Matveevcd571e02013-06-06 14:17:56 +0000431}
Sergey Matveev200afbd2013-06-21 14:51:52 +0000432
433SANITIZER_INTERFACE_ATTRIBUTE
434void __lsan_disable() {
435#if CAN_SANITIZE_LEAKS
436 __lsan::disable_counter++;
437#endif
438}
439
440SANITIZER_INTERFACE_ATTRIBUTE
441void __lsan_enable() {
442#if CAN_SANITIZE_LEAKS
443 if (!__lsan::disable_counter) {
444 Report("Unmatched call to __lsan_enable().\n");
445 Die();
446 }
447 __lsan::disable_counter--;
448#endif
449}
Sergey Matveevcd571e02013-06-06 14:17:56 +0000450} // extern "C"