blob: 0ffba505cc707f417be5eac0b5a17aabd7a9486e [file] [log] [blame]
Sergey Matveevb5483be2013-05-20 11:06:50 +00001//=-- lsan_common.cc ------------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
Evgeniy Stepanovf294d5b2015-01-15 15:13:43 +000019#include "sanitizer_common/sanitizer_flag_parser.h"
Sergey Matveev2b19ee32013-06-28 14:38:31 +000020#include "sanitizer_common/sanitizer_placement_new.h"
Sergey Matveev72378792013-12-17 11:11:23 +000021#include "sanitizer_common/sanitizer_procmaps.h"
Sergey Matveevb5483be2013-05-20 11:06:50 +000022#include "sanitizer_common/sanitizer_stackdepot.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
Sergey Matveev2b19ee32013-06-28 14:38:31 +000024#include "sanitizer_common/sanitizer_suppressions.h"
Sergey Matveevb9d34442013-09-03 13:31:03 +000025#include "sanitizer_common/sanitizer_report_decorator.h"
Sergey Matveevb5483be2013-05-20 11:06:50 +000026
Sergey Matveev7ed5c682013-06-21 15:14:57 +000027#if CAN_SANITIZE_LEAKS
Sergey Matveev38fa1ff2013-06-21 15:50:49 +000028namespace __lsan {
Sergey Matveev17f57052013-05-21 15:35:34 +000029
Sergey Matveev72378792013-12-17 11:11:23 +000030// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
31// also to protect the global list of root regions.
Sergey Matveevecc4f5b2013-06-06 14:17:56 +000032BlockingMutex global_mutex(LINKER_INITIALIZED);
33
Sergey Matveevb94d5e22013-06-21 14:51:52 +000034THREADLOCAL int disable_counter;
Sergey Matveev38fa1ff2013-06-21 15:50:49 +000035bool DisabledInThisThread() { return disable_counter > 0; }
Sergey Matveevb94d5e22013-06-21 14:51:52 +000036
Sergey Matveevb5483be2013-05-20 11:06:50 +000037Flags lsan_flags;
38
Alexey Samsonov03499e92015-01-07 00:38:00 +000039void Flags::SetDefaults() {
40#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
41#include "lsan_flags.inc"
42#undef LSAN_FLAG
43}
44
Alexey Samsonov79df1872015-02-17 18:50:30 +000045void RegisterLsanFlags(FlagParser *parser, Flags *f) {
Evgeniy Stepanovf294d5b2015-01-15 15:13:43 +000046#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
47 RegisterFlag(parser, #Name, Description, &f->Name);
Alexey Samsonov03499e92015-01-07 00:38:00 +000048#include "lsan_flags.inc"
49#undef LSAN_FLAG
50}
51
Sergey Matveev7eab7342013-12-25 17:14:40 +000052#define LOG_POINTERS(...) \
53 do { \
54 if (flags()->log_pointers) Report(__VA_ARGS__); \
55 } while (0);
56
57#define LOG_THREADS(...) \
58 do { \
59 if (flags()->log_threads) Report(__VA_ARGS__); \
60 } while (0);
61
Alexey Samsonovd1c31862015-02-20 17:41:59 +000062ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
63static SuppressionContext *suppression_ctx = nullptr;
64static const char kSuppressionLeak[] = "leak";
65static const char *kSuppressionTypes[] = { kSuppressionLeak };
Alexey Samsonovae176482014-08-05 00:43:23 +000066
Sergey Matveev2b19ee32013-06-28 14:38:31 +000067void InitializeSuppressions() {
Alexey Samsonovd1c31862015-02-20 17:41:59 +000068 CHECK_EQ(nullptr, suppression_ctx);
69 suppression_ctx = new (suppression_placeholder) // NOLINT
70 SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
71 suppression_ctx->ParseFromFile(flags()->suppressions);
Sergey Matveev2b19ee32013-06-28 14:38:31 +000072 if (&__lsan_default_suppressions)
Alexey Samsonovd1c31862015-02-20 17:41:59 +000073 suppression_ctx->Parse(__lsan_default_suppressions());
74}
75
76static SuppressionContext *GetSuppressionContext() {
77 CHECK(suppression_ctx);
78 return suppression_ctx;
Sergey Matveev2b19ee32013-06-28 14:38:31 +000079}
80
Sergey Matveev72378792013-12-17 11:11:23 +000081struct RootRegion {
82 const void *begin;
83 uptr size;
84};
85
86InternalMmapVector<RootRegion> *root_regions;
87
88void InitializeRootRegions() {
89 CHECK(!root_regions);
90 ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
91 root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
92}
93
Alexey Samsonov79df1872015-02-17 18:50:30 +000094void InitCommonLsan() {
Sergey Matveev72378792013-12-17 11:11:23 +000095 InitializeRootRegions();
Sergey Matveevfd100732013-10-21 19:35:00 +000096 if (common_flags()->detect_leaks) {
97 // Initialization which can fail or print warnings should only be done if
98 // LSan is actually enabled.
99 InitializeSuppressions();
100 InitializePlatformSpecificModules();
101 }
Sergey Matveevb5483be2013-05-20 11:06:50 +0000102}
103
Sergey Matveevdcd9bba2014-06-04 16:57:03 +0000104class Decorator: public __sanitizer::SanitizerCommonDecorator {
Sergey Matveevb9d34442013-09-03 13:31:03 +0000105 public:
Sergey Matveevdcd9bba2014-06-04 16:57:03 +0000106 Decorator() : SanitizerCommonDecorator() { }
Sergey Matveevb9d34442013-09-03 13:31:03 +0000107 const char *Error() { return Red(); }
108 const char *Leak() { return Blue(); }
109 const char *End() { return Default(); }
110};
111
Sergey Matveevb5483be2013-05-20 11:06:50 +0000112static inline bool CanBeAHeapPointer(uptr p) {
113 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000114 // bound on heap addresses.
Sergey Matveevb5483be2013-05-20 11:06:50 +0000115 const uptr kMinAddress = 4 * 4096;
116 if (p < kMinAddress) return false;
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +0000117#if defined(__x86_64__)
Sergey Matveevb5483be2013-05-20 11:06:50 +0000118 // Accept only canonical form user-space addresses.
119 return ((p >> 47) == 0);
Mohit K. Bhakkad36f974d2015-02-19 07:30:39 +0000120#elif defined(__mips64)
121 return ((p >> 40) == 0);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000122#else
123 return true;
124#endif
125}
126
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000127// Scans the memory range, looking for byte patterns that point into allocator
128// chunks. Marks those chunks with |tag| and adds them to |frontier|.
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000129// There are two usage modes for this function: finding reachable chunks
130// (|tag| = kReachable) and finding indirectly leaked chunks
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000131// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
132// so |frontier| = 0.
Alexey Samsonovb0d92b32013-06-14 09:59:40 +0000133void ScanRangeForPointers(uptr begin, uptr end,
Alexey Samsonovbd0428b2013-06-14 10:07:56 +0000134 Frontier *frontier,
Sergey Matveevb5483be2013-05-20 11:06:50 +0000135 const char *region_type, ChunkTag tag) {
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000136 CHECK(tag == kReachable || tag == kIndirectlyLeaked);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000137 const uptr alignment = flags()->pointer_alignment();
Sergey Matveev7eab7342013-12-25 17:14:40 +0000138 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000139 uptr pp = begin;
140 if (pp % alignment)
141 pp = pp + alignment - pp % alignment;
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000142 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
Sergey Matveev655bd0d2013-07-18 14:06:07 +0000143 void *p = *reinterpret_cast<void **>(pp);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000144 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000145 uptr chunk = PointsIntoChunk(p);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000146 if (!chunk) continue;
Sergey Matveev27ef1752013-10-15 16:00:11 +0000147 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
148 if (chunk == begin) continue;
Sergey Matveevb5483be2013-05-20 11:06:50 +0000149 LsanMetadata m(chunk);
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000150 if (m.tag() == kReachable || m.tag() == kIgnored) continue;
Sergey Matveevb1b8d1a2013-12-09 13:12:10 +0000151
152 // Do this check relatively late so we can log only the interesting cases.
153 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
Sergey Matveev7eab7342013-12-25 17:14:40 +0000154 LOG_POINTERS(
155 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
156 "%zu.\n",
157 pp, p, chunk, chunk + m.requested_size(), m.requested_size());
Sergey Matveevb1b8d1a2013-12-09 13:12:10 +0000158 continue;
159 }
160
Sergey Matveevb5483be2013-05-20 11:06:50 +0000161 m.set_tag(tag);
Sergey Matveev7eab7342013-12-25 17:14:40 +0000162 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
163 chunk, chunk + m.requested_size(), m.requested_size());
Sergey Matveevb5483be2013-05-20 11:06:50 +0000164 if (frontier)
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000165 frontier->push_back(chunk);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000166 }
167}
168
Sergey Matveev43d90cb2013-10-14 14:04:50 +0000169void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
170 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
171 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
172}
173
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000174// Scans thread data (stacks and TLS) for heap pointers.
Sergey Matveevb5483be2013-05-20 11:06:50 +0000175static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Alexey Samsonovbd0428b2013-06-14 10:07:56 +0000176 Frontier *frontier) {
Sergey Matveevb5483be2013-05-20 11:06:50 +0000177 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
178 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
179 uptr registers_end = registers_begin + registers.size();
180 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
181 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
Sergey Matveev7eab7342013-12-25 17:14:40 +0000182 LOG_THREADS("Processing thread %d.\n", os_id);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000183 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
184 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
185 &tls_begin, &tls_end,
186 &cache_begin, &cache_end);
187 if (!thread_found) {
188 // If a thread can't be found in the thread registry, it's probably in the
189 // process of destruction. Log this event and move on.
Sergey Matveev7eab7342013-12-25 17:14:40 +0000190 LOG_THREADS("Thread %d not found in registry.\n", os_id);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000191 continue;
192 }
193 uptr sp;
194 bool have_registers =
195 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
196 if (!have_registers) {
197 Report("Unable to get registers from thread %d.\n");
198 // If unable to get SP, consider the entire stack to be reachable.
199 sp = stack_begin;
200 }
201
Sergey Matveev39e8a6d2013-05-27 11:41:46 +0000202 if (flags()->use_registers && have_registers)
Sergey Matveevb5483be2013-05-20 11:06:50 +0000203 ScanRangeForPointers(registers_begin, registers_end, frontier,
204 "REGISTERS", kReachable);
205
Sergey Matveev39e8a6d2013-05-27 11:41:46 +0000206 if (flags()->use_stacks) {
Sergey Matveev7eab7342013-12-25 17:14:40 +0000207 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000208 if (sp < stack_begin || sp >= stack_end) {
209 // SP is outside the recorded stack range (e.g. the thread is running a
210 // signal handler on alternate stack). Again, consider the entire stack
211 // range to be reachable.
Sergey Matveev7eab7342013-12-25 17:14:40 +0000212 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
Sergey Matveevb5483be2013-05-20 11:06:50 +0000213 } else {
214 // Shrink the stack range to ignore out-of-scope values.
215 stack_begin = sp;
216 }
217 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
218 kReachable);
Sergey Matveev43d90cb2013-10-14 14:04:50 +0000219 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000220 }
221
Sergey Matveev39e8a6d2013-05-27 11:41:46 +0000222 if (flags()->use_tls) {
Sergey Matveev7eab7342013-12-25 17:14:40 +0000223 LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
Sergey Matveeve45a0672013-05-24 18:07:53 +0000224 if (cache_begin == cache_end) {
225 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
226 } else {
227 // Because LSan should not be loaded with dlopen(), we can assume
228 // that allocator cache will be part of static TLS image.
229 CHECK_LE(tls_begin, cache_begin);
230 CHECK_GE(tls_end, cache_end);
231 if (tls_begin < cache_begin)
232 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
233 kReachable);
234 if (tls_end > cache_end)
235 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
236 }
Sergey Matveevb5483be2013-05-20 11:06:50 +0000237 }
238 }
239}
240
Sergey Matveev72378792013-12-17 11:11:23 +0000241static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
242 uptr root_end) {
243 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
244 uptr begin, end, prot;
245 while (proc_maps.Next(&begin, &end,
246 /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
247 &prot)) {
248 uptr intersection_begin = Max(root_begin, begin);
249 uptr intersection_end = Min(end, root_end);
250 if (intersection_begin >= intersection_end) continue;
251 bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
Sergey Matveev7eab7342013-12-25 17:14:40 +0000252 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
253 root_begin, root_end, begin, end,
254 is_readable ? "readable" : "unreadable");
Sergey Matveev72378792013-12-17 11:11:23 +0000255 if (is_readable)
256 ScanRangeForPointers(intersection_begin, intersection_end, frontier,
257 "ROOT", kReachable);
258 }
259}
260
261// Scans root regions for heap pointers.
262static void ProcessRootRegions(Frontier *frontier) {
263 if (!flags()->use_root_regions) return;
264 CHECK(root_regions);
265 for (uptr i = 0; i < root_regions->size(); i++) {
266 RootRegion region = (*root_regions)[i];
267 uptr begin_addr = reinterpret_cast<uptr>(region.begin);
268 ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
269 }
270}
271
Alexey Samsonovbd0428b2013-06-14 10:07:56 +0000272static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
Sergey Matveevb5483be2013-05-20 11:06:50 +0000273 while (frontier->size()) {
274 uptr next_chunk = frontier->back();
275 frontier->pop_back();
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000276 LsanMetadata m(next_chunk);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000277 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
Sergey Matveev17ee1ab2013-06-03 11:21:34 +0000278 "HEAP", tag);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000279 }
280}
281
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000282// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
283// which are reachable from it as indirectly leaked.
284static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
285 chunk = GetUserBegin(chunk);
286 LsanMetadata m(chunk);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000287 if (m.allocated() && m.tag() != kReachable) {
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000288 ScanRangeForPointers(chunk, chunk + m.requested_size(),
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000289 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000290 }
291}
292
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000293// ForEachChunk callback. If chunk is marked as ignored, adds its address to
294// frontier.
295static void CollectIgnoredCb(uptr chunk, void *arg) {
296 CHECK(arg);
297 chunk = GetUserBegin(chunk);
298 LsanMetadata m(chunk);
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000299 if (m.allocated() && m.tag() == kIgnored) {
300 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
301 chunk, chunk + m.requested_size(), m.requested_size());
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000302 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000303 }
Sergey Matveev17ee1ab2013-06-03 11:21:34 +0000304}
305
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000306// Sets the appropriate tag on each chunk.
Sergey Matveevb5483be2013-05-20 11:06:50 +0000307static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
308 // Holds the flood fill frontier.
Sergey Matveev7eab7342013-12-25 17:14:40 +0000309 Frontier frontier(1);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000310
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000311 ForEachChunk(CollectIgnoredCb, &frontier);
Sergey Matveev7eab7342013-12-25 17:14:40 +0000312 ProcessGlobalRegions(&frontier);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000313 ProcessThreads(suspended_threads, &frontier);
Sergey Matveev72378792013-12-17 11:11:23 +0000314 ProcessRootRegions(&frontier);
Sergey Matveev17ee1ab2013-06-03 11:21:34 +0000315 FloodFillTag(&frontier, kReachable);
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000316
Sergey Matveev17ee1ab2013-06-03 11:21:34 +0000317 // The check here is relatively expensive, so we do this in a separate flood
318 // fill. That way we can skip the check for chunks that are reachable
319 // otherwise.
Sergey Matveev7eab7342013-12-25 17:14:40 +0000320 LOG_POINTERS("Processing platform-specific allocations.\n");
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000321 CHECK_EQ(0, frontier.size());
Sergey Matveevb5483be2013-05-20 11:06:50 +0000322 ProcessPlatformSpecificAllocations(&frontier);
Sergey Matveev17ee1ab2013-06-03 11:21:34 +0000323 FloodFillTag(&frontier, kReachable);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000324
Sergey Matveev17ee1ab2013-06-03 11:21:34 +0000325 // Iterate over leaked chunks and mark those that are reachable from other
326 // leaked chunks.
Sergey Matveev7eab7342013-12-25 17:14:40 +0000327 LOG_POINTERS("Scanning leaked chunks.\n");
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000328 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
329}
330
331// ForEachChunk callback. Resets the tags to pre-leak-check state.
332static void ResetTagsCb(uptr chunk, void *arg) {
333 (void)arg;
334 chunk = GetUserBegin(chunk);
335 LsanMetadata m(chunk);
336 if (m.allocated() && m.tag() != kIgnored)
337 m.set_tag(kDirectlyLeaked);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000338}
339
340static void PrintStackTraceById(u32 stack_trace_id) {
341 CHECK(stack_trace_id);
Alexey Samsonov9c859272014-10-26 03:35:14 +0000342 StackDepotGet(stack_trace_id).Print();
Sergey Matveevb5483be2013-05-20 11:06:50 +0000343}
344
Sergey Matveev7eab7342013-12-25 17:14:40 +0000345// ForEachChunk callback. Aggregates information about unreachable chunks into
346// a LeakReport.
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000347static void CollectLeaksCb(uptr chunk, void *arg) {
348 CHECK(arg);
349 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
350 chunk = GetUserBegin(chunk);
351 LsanMetadata m(chunk);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000352 if (!m.allocated()) return;
Sergey Matveev17ee1ab2013-06-03 11:21:34 +0000353 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
Evgeniy Stepanovd38af302015-01-22 13:33:16 +0000354 u32 resolution = flags()->resolution;
Sergey Matveev625875d2013-12-24 12:03:02 +0000355 u32 stack_trace_id = 0;
Sergey Matveevb5483be2013-05-20 11:06:50 +0000356 if (resolution > 0) {
Alexey Samsonov9c859272014-10-26 03:35:14 +0000357 StackTrace stack = StackDepotGet(m.stack_trace_id());
Alexey Samsonov3741ab82014-10-26 06:23:07 +0000358 stack.size = Min(stack.size, resolution);
359 stack_trace_id = StackDepotPut(stack);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000360 } else {
Sergey Matveev625875d2013-12-24 12:03:02 +0000361 stack_trace_id = m.stack_trace_id();
Sergey Matveevb5483be2013-05-20 11:06:50 +0000362 }
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000363 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
364 m.tag());
Sergey Matveevb5483be2013-05-20 11:06:50 +0000365 }
366}
367
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000368static void PrintMatchedSuppressions() {
369 InternalMmapVector<Suppression *> matched(1);
Alexey Samsonovd1c31862015-02-20 17:41:59 +0000370 GetSuppressionContext()->GetMatched(&matched);
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000371 if (!matched.size())
372 return;
373 const char *line = "-----------------------------------------------------";
374 Printf("%s\n", line);
375 Printf("Suppressions used:\n");
376 Printf(" count bytes template\n");
377 for (uptr i = 0; i < matched.size(); i++)
378 Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
379 matched[i]->weight, matched[i]->templ);
380 Printf("%s\n\n", line);
381}
382
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000383struct CheckForLeaksParam {
Sergey Matveevdac35c22013-06-19 14:04:11 +0000384 bool success;
385 LeakReport leak_report;
Sergey Matveevbc880f32013-05-24 13:16:02 +0000386};
387
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000388static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
389 void *arg) {
390 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
Sergey Matveevdac35c22013-06-19 14:04:11 +0000391 CHECK(param);
392 CHECK(!param->success);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000393 ClassifyAllChunks(suspended_threads);
Sergey Matveev4e0215a2013-06-24 08:34:50 +0000394 ForEachChunk(CollectLeaksCb, &param->leak_report);
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000395 // Clean up for subsequent leak checks. This assumes we did not overwrite any
396 // kIgnored tags.
397 ForEachChunk(ResetTagsCb, nullptr);
Sergey Matveevdac35c22013-06-19 14:04:11 +0000398 param->success = true;
Sergey Matveevb5483be2013-05-20 11:06:50 +0000399}
400
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000401static bool CheckForLeaks() {
Kostya Serebryany06222912014-01-15 08:04:21 +0000402 if (&__lsan_is_turned_off && __lsan_is_turned_off())
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000403 return false;
404 EnsureMainThreadIDIsCorrect();
405 CheckForLeaksParam param;
Sergey Matveevdac35c22013-06-19 14:04:11 +0000406 param.success = false;
Sergey Matveev17ee1ab2013-06-03 11:21:34 +0000407 LockThreadRegistry();
408 LockAllocator();
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000409 DoStopTheWorld(CheckForLeaksCallback, &param);
Sergey Matveev17ee1ab2013-06-03 11:21:34 +0000410 UnlockAllocator();
411 UnlockThreadRegistry();
Sergey Matveevdac35c22013-06-19 14:04:11 +0000412
413 if (!param.success) {
Sergey Matveevbc880f32013-05-24 13:16:02 +0000414 Report("LeakSanitizer has encountered a fatal error.\n");
415 Die();
Sergey Matveevdac35c22013-06-19 14:04:11 +0000416 }
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000417 param.leak_report.ApplySuppressions();
418 uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
419 if (unsuppressed_count > 0) {
Sergey Matveevb9d34442013-09-03 13:31:03 +0000420 Decorator d;
Sergey Matveev66720042013-06-28 15:05:16 +0000421 Printf("\n"
422 "================================================================="
Sergey Matveevdac35c22013-06-19 14:04:11 +0000423 "\n");
Sergey Matveevb9d34442013-09-03 13:31:03 +0000424 Printf("%s", d.Error());
Sergey Matveevdac35c22013-06-19 14:04:11 +0000425 Report("ERROR: LeakSanitizer: detected memory leaks\n");
Sergey Matveevb9d34442013-09-03 13:31:03 +0000426 Printf("%s", d.End());
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000427 param.leak_report.ReportTopLeaks(flags()->max_leaks);
Sergey Matveev66720042013-06-28 15:05:16 +0000428 }
Alexey Samsonov77f646c2014-07-30 21:33:04 +0000429 if (common_flags()->print_suppressions)
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000430 PrintMatchedSuppressions();
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000431 if (unsuppressed_count > 0) {
Sergey Matveevdac35c22013-06-19 14:04:11 +0000432 param.leak_report.PrintSummary();
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000433 return true;
Sergey Matveevbc880f32013-05-24 13:16:02 +0000434 }
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000435 return false;
436}
437
438void DoLeakCheck() {
439 BlockingMutexLock l(&global_mutex);
440 static bool already_done;
441 if (already_done) return;
442 already_done = true;
443 bool have_leaks = CheckForLeaks();
444 if (!have_leaks) {
445 return;
446 }
447 if (flags()->exitcode) {
448 if (common_flags()->coverage)
449 __sanitizer_cov_dump();
450 internal__exit(flags()->exitcode);
451 }
452}
453
454static int DoRecoverableLeakCheck() {
455 BlockingMutexLock l(&global_mutex);
456 bool have_leaks = CheckForLeaks();
457 return have_leaks ? 1 : 0;
Sergey Matveevb5483be2013-05-20 11:06:50 +0000458}
459
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000460static Suppression *GetSuppressionForAddr(uptr addr) {
Alexey Samsonov0e906682014-12-02 19:48:40 +0000461 Suppression *s = nullptr;
Alexey Samsonov23347de2013-12-25 20:15:46 +0000462
463 // Suppress by module name.
Alexey Samsonovd1c31862015-02-20 17:41:59 +0000464 SuppressionContext *suppressions = GetSuppressionContext();
Kostya Serebryanyc89aa6d62015-03-05 01:30:36 +0000465 if (const char *module_name =
466 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
467 if (suppressions->Match(module_name, kSuppressionLeak, &s))
468 return s;
Alexey Samsonov23347de2013-12-25 20:15:46 +0000469
470 // Suppress by file or function name.
Alexey Samsonov0e906682014-12-02 19:48:40 +0000471 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
472 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
Alexey Samsonovd1c31862015-02-20 17:41:59 +0000473 if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
474 suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
Alexey Samsonov0e906682014-12-02 19:48:40 +0000475 break;
476 }
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000477 }
Alexey Samsonov0e906682014-12-02 19:48:40 +0000478 frames->ClearAll();
479 return s;
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000480}
481
482static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
Alexey Samsonov9c859272014-10-26 03:35:14 +0000483 StackTrace stack = StackDepotGet(stack_trace_id);
484 for (uptr i = 0; i < stack.size; i++) {
485 Suppression *s = GetSuppressionForAddr(
486 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000487 if (s) return s;
488 }
489 return 0;
490}
491
Sergey Matveevb5483be2013-05-20 11:06:50 +0000492///// LeakReport implementation. /////
493
494// A hard limit on the number of distinct leaks, to avoid quadratic complexity
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000495// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
496// in real-world applications.
Sergey Matveevb5483be2013-05-20 11:06:50 +0000497// FIXME: Get rid of this limit by changing the implementation of LeakReport to
498// use a hash table.
Sergey Matveev28dc98a2013-07-12 12:31:22 +0000499const uptr kMaxLeaksConsidered = 5000;
Sergey Matveevb5483be2013-05-20 11:06:50 +0000500
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000501void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
502 uptr leaked_size, ChunkTag tag) {
Sergey Matveevb5483be2013-05-20 11:06:50 +0000503 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
504 bool is_directly_leaked = (tag == kDirectlyLeaked);
Sergey Matveev625875d2013-12-24 12:03:02 +0000505 uptr i;
506 for (i = 0; i < leaks_.size(); i++) {
Sergey Matveevb5483be2013-05-20 11:06:50 +0000507 if (leaks_[i].stack_trace_id == stack_trace_id &&
508 leaks_[i].is_directly_leaked == is_directly_leaked) {
509 leaks_[i].hit_count++;
510 leaks_[i].total_size += leaked_size;
Sergey Matveev625875d2013-12-24 12:03:02 +0000511 break;
Sergey Matveevb5483be2013-05-20 11:06:50 +0000512 }
Sergey Matveev625875d2013-12-24 12:03:02 +0000513 }
514 if (i == leaks_.size()) {
515 if (leaks_.size() == kMaxLeaksConsidered) return;
516 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
517 is_directly_leaked, /* is_suppressed */ false };
518 leaks_.push_back(leak);
519 }
520 if (flags()->report_objects) {
521 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
522 leaked_objects_.push_back(obj);
523 }
Sergey Matveevb5483be2013-05-20 11:06:50 +0000524}
525
Sergey Matveev1ac48242013-07-22 11:18:32 +0000526static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
527 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
528 return leak1.total_size > leak2.total_size;
529 else
530 return leak1.is_directly_leaked;
Sergey Matveevb5483be2013-05-20 11:06:50 +0000531}
532
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000533void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
Sergey Matveevb5483be2013-05-20 11:06:50 +0000534 CHECK(leaks_.size() <= kMaxLeaksConsidered);
535 Printf("\n");
536 if (leaks_.size() == kMaxLeaksConsidered)
Sergey Matveevd28c03c2013-06-21 15:10:20 +0000537 Printf("Too many leaks! Only the first %zu leaks encountered will be "
Sergey Matveevb5483be2013-05-20 11:06:50 +0000538 "reported.\n",
539 kMaxLeaksConsidered);
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000540
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000541 uptr unsuppressed_count = UnsuppressedLeakCount();
542 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
543 Printf("The %zu top leak(s):\n", num_leaks_to_report);
Sergey Matveev1ac48242013-07-22 11:18:32 +0000544 InternalSort(&leaks_, leaks_.size(), LeakComparator);
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000545 uptr leaks_reported = 0;
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000546 for (uptr i = 0; i < leaks_.size(); i++) {
547 if (leaks_[i].is_suppressed) continue;
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000548 PrintReportForLeak(i);
549 leaks_reported++;
550 if (leaks_reported == num_leaks_to_report) break;
Sergey Matveevb5483be2013-05-20 11:06:50 +0000551 }
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000552 if (leaks_reported < unsuppressed_count) {
553 uptr remaining = unsuppressed_count - leaks_reported;
Sergey Matveevd28c03c2013-06-21 15:10:20 +0000554 Printf("Omitting %zu more leak(s).\n", remaining);
Sergey Matveevb5483be2013-05-20 11:06:50 +0000555 }
556}
Sergey Matveev17f57052013-05-21 15:35:34 +0000557
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000558void LeakReport::PrintReportForLeak(uptr index) {
559 Decorator d;
560 Printf("%s", d.Leak());
561 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
562 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
563 leaks_[index].total_size, leaks_[index].hit_count);
564 Printf("%s", d.End());
565
566 PrintStackTraceById(leaks_[index].stack_trace_id);
567
568 if (flags()->report_objects) {
569 Printf("Objects leaked above:\n");
570 PrintLeakedObjectsForLeak(index);
571 Printf("\n");
572 }
573}
574
575void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
576 u32 leak_id = leaks_[index].id;
577 for (uptr j = 0; j < leaked_objects_.size(); j++) {
578 if (leaked_objects_[j].leak_id == leak_id)
579 Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
580 leaked_objects_[j].size);
581 }
582}
583
Sergey Matveevc7715a22013-05-24 14:49:13 +0000584void LeakReport::PrintSummary() {
585 CHECK(leaks_.size() <= kMaxLeaksConsidered);
Sergey Matveev37dff382013-05-24 15:36:30 +0000586 uptr bytes = 0, allocations = 0;
Sergey Matveevc7715a22013-05-24 14:49:13 +0000587 for (uptr i = 0; i < leaks_.size(); i++) {
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000588 if (leaks_[i].is_suppressed) continue;
Sergey Matveev37dff382013-05-24 15:36:30 +0000589 bytes += leaks_[i].total_size;
590 allocations += leaks_[i].hit_count;
Sergey Matveevc7715a22013-05-24 14:49:13 +0000591 }
Alexey Samsonov656c29b2014-12-02 22:20:11 +0000592 InternalScopedString summary(kMaxSummaryLength);
593 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
594 allocations);
Alexey Samsonov5dc6cff2013-11-01 17:02:14 +0000595 ReportErrorSummary(summary.data());
Sergey Matveevc7715a22013-05-24 14:49:13 +0000596}
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000597
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000598void LeakReport::ApplySuppressions() {
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000599 for (uptr i = 0; i < leaks_.size(); i++) {
600 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
601 if (s) {
602 s->weight += leaks_[i].total_size;
603 s->hit_count += leaks_[i].hit_count;
604 leaks_[i].is_suppressed = true;
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000605 }
606 }
Sergey Matveev2b19ee32013-06-28 14:38:31 +0000607}
Sergey Matveev27aea0b2013-12-24 12:42:15 +0000608
609uptr LeakReport::UnsuppressedLeakCount() {
610 uptr result = 0;
611 for (uptr i = 0; i < leaks_.size(); i++)
612 if (!leaks_[i].is_suppressed) result++;
613 return result;
614}
615
Sergey Matveev7ed5c682013-06-21 15:14:57 +0000616} // namespace __lsan
Sergey Matveev38fa1ff2013-06-21 15:50:49 +0000617#endif // CAN_SANITIZE_LEAKS
Sergey Matveev7ed5c682013-06-21 15:14:57 +0000618
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000619using namespace __lsan; // NOLINT
620
621extern "C" {
Sergey Matveev1c4e2142013-06-06 18:40:55 +0000622SANITIZER_INTERFACE_ATTRIBUTE
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000623void __lsan_ignore_object(const void *p) {
Sergey Matveev5e6b9ec2013-06-20 13:39:42 +0000624#if CAN_SANITIZE_LEAKS
Sergey Matveevfd100732013-10-21 19:35:00 +0000625 if (!common_flags()->detect_leaks)
626 return;
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000627 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
628 // locked.
629 BlockingMutexLock l(&global_mutex);
630 IgnoreObjectResult res = IgnoreObjectLocked(p);
Sergey Matveev149b7942013-12-17 18:18:32 +0000631 if (res == kIgnoreObjectInvalid)
632 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
633 if (res == kIgnoreObjectAlreadyIgnored)
634 VReport(1, "__lsan_ignore_object(): "
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000635 "heap object at %p is already being ignored\n", p);
Sergey Matveev149b7942013-12-17 18:18:32 +0000636 if (res == kIgnoreObjectSuccess)
637 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
Sergey Matveev5e6b9ec2013-06-20 13:39:42 +0000638#endif // CAN_SANITIZE_LEAKS
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000639}
Sergey Matveevb94d5e22013-06-21 14:51:52 +0000640
641SANITIZER_INTERFACE_ATTRIBUTE
Sergey Matveev72378792013-12-17 11:11:23 +0000642void __lsan_register_root_region(const void *begin, uptr size) {
643#if CAN_SANITIZE_LEAKS
644 BlockingMutexLock l(&global_mutex);
645 CHECK(root_regions);
646 RootRegion region = {begin, size};
647 root_regions->push_back(region);
Sergey Matveev149b7942013-12-17 18:18:32 +0000648 VReport(1, "Registered root region at %p of size %llu\n", begin, size);
Sergey Matveev72378792013-12-17 11:11:23 +0000649#endif // CAN_SANITIZE_LEAKS
650}
651
652SANITIZER_INTERFACE_ATTRIBUTE
653void __lsan_unregister_root_region(const void *begin, uptr size) {
654#if CAN_SANITIZE_LEAKS
655 BlockingMutexLock l(&global_mutex);
656 CHECK(root_regions);
657 bool removed = false;
658 for (uptr i = 0; i < root_regions->size(); i++) {
659 RootRegion region = (*root_regions)[i];
660 if (region.begin == begin && region.size == size) {
661 removed = true;
662 uptr last_index = root_regions->size() - 1;
663 (*root_regions)[i] = (*root_regions)[last_index];
664 root_regions->pop_back();
Sergey Matveev149b7942013-12-17 18:18:32 +0000665 VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
Sergey Matveev72378792013-12-17 11:11:23 +0000666 break;
667 }
668 }
669 if (!removed) {
670 Report(
671 "__lsan_unregister_root_region(): region at %p of size %llu has not "
672 "been registered.\n",
673 begin, size);
674 Die();
675 }
676#endif // CAN_SANITIZE_LEAKS
677}
678
679SANITIZER_INTERFACE_ATTRIBUTE
Sergey Matveevb94d5e22013-06-21 14:51:52 +0000680void __lsan_disable() {
681#if CAN_SANITIZE_LEAKS
682 __lsan::disable_counter++;
683#endif
684}
685
686SANITIZER_INTERFACE_ATTRIBUTE
687void __lsan_enable() {
688#if CAN_SANITIZE_LEAKS
Sergey Matveevfd100732013-10-21 19:35:00 +0000689 if (!__lsan::disable_counter && common_flags()->detect_leaks) {
Sergey Matveevb94d5e22013-06-21 14:51:52 +0000690 Report("Unmatched call to __lsan_enable().\n");
691 Die();
692 }
693 __lsan::disable_counter--;
694#endif
695}
Alexey Samsonov895784a2013-06-27 09:35:50 +0000696
Sergey Matveev655bd0d2013-07-18 14:06:07 +0000697SANITIZER_INTERFACE_ATTRIBUTE
698void __lsan_do_leak_check() {
699#if CAN_SANITIZE_LEAKS
Sergey Matveev9c012162013-07-22 12:38:17 +0000700 if (common_flags()->detect_leaks)
701 __lsan::DoLeakCheck();
702#endif // CAN_SANITIZE_LEAKS
Sergey Matveev655bd0d2013-07-18 14:06:07 +0000703}
704
Alexey Samsonov5e077462015-04-24 19:45:46 +0000705SANITIZER_INTERFACE_ATTRIBUTE
Sergey Matveevcbdd0dc2015-04-24 16:53:15 +0000706int __lsan_do_recoverable_leak_check() {
707#if CAN_SANITIZE_LEAKS
708 if (common_flags()->detect_leaks)
709 return __lsan::DoRecoverableLeakCheck();
710#endif // CAN_SANITIZE_LEAKS
711 return 0;
712}
713
Alexey Samsonov895784a2013-06-27 09:35:50 +0000714#if !SANITIZER_SUPPORTS_WEAK_HOOKS
Timur Iskhodzhanoveee13912013-08-13 11:42:45 +0000715SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
Alexey Samsonov895784a2013-06-27 09:35:50 +0000716int __lsan_is_turned_off() {
717 return 0;
718}
719#endif
Sergey Matveevecc4f5b2013-06-06 14:17:56 +0000720} // extern "C"