blob: 53a900915b67db6297ec97505bb84e4897b0470e [file] [log] [blame]
Sergey Matveevab0f7442013-05-20 11:06:50 +00001//=-- lsan_common.cc ------------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "sanitizer_common/sanitizer_stoptheworld.h"
22
Sergey Matveev9bdf7802013-05-21 14:12:11 +000023#if CAN_SANITIZE_LEAKS
Sergey Matveev0bc81772013-05-21 15:35:34 +000024namespace __lsan {
25
Sergey Matveevab0f7442013-05-20 11:06:50 +000026Flags lsan_flags;
27
28static void InitializeFlags() {
29 Flags *f = flags();
30 // Default values.
31 f->sources = kSourceAllAligned;
32 f->report_blocks = false;
33 f->resolution = 0;
34 f->max_leaks = 0;
Sergey Matveev969b5292013-05-24 13:16:02 +000035 f->exitcode = 23;
Sergey Matveevab0f7442013-05-20 11:06:50 +000036 f->log_pointers = false;
37 f->log_threads = false;
38
39 const char *options = GetEnv("LSAN_OPTIONS");
40 if (options) {
41 bool aligned = true;
42 ParseFlag(options, &aligned, "aligned");
43 if (!aligned) f->sources |= kSourceUnaligned;
44 ParseFlag(options, &f->report_blocks, "report_blocks");
45 ParseFlag(options, &f->resolution, "resolution");
46 CHECK_GE(&f->resolution, 0);
47 ParseFlag(options, &f->max_leaks, "max_leaks");
48 CHECK_GE(&f->max_leaks, 0);
49 ParseFlag(options, &f->log_pointers, "log_pointers");
50 ParseFlag(options, &f->log_threads, "log_threads");
Sergey Matveev969b5292013-05-24 13:16:02 +000051 ParseFlag(options, &f->exitcode, "exitcode");
Sergey Matveevab0f7442013-05-20 11:06:50 +000052 }
53}
54
55void InitCommonLsan() {
56 InitializeFlags();
57 InitializePlatformSpecificModules();
58}
59
60static inline bool CanBeAHeapPointer(uptr p) {
61 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
62 // boundary on heap addresses.
63 const uptr kMinAddress = 4 * 4096;
64 if (p < kMinAddress) return false;
65#ifdef __x86_64__
66 // Accept only canonical form user-space addresses.
67 return ((p >> 47) == 0);
68#else
69 return true;
70#endif
71}
72
73// Scan the memory range, looking for byte patterns that point into allocator
74// chunks. Mark those chunks with tag and add them to the frontier.
75// There are two usage modes for this function: finding non-leaked chunks
76// (tag = kReachable) and finding indirectly leaked chunks
77// (tag = kIndirectlyLeaked). In the second case, there's no flood fill,
78// so frontier = 0.
79void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier,
80 const char *region_type, ChunkTag tag) {
81 const uptr alignment = flags()->pointer_alignment();
82 if (flags()->log_pointers)
83 Report("Scanning %s range %p-%p.\n", region_type, begin, end);
84 uptr pp = begin;
85 if (pp % alignment)
86 pp = pp + alignment - pp % alignment;
87 for (; pp + sizeof(uptr) <= end; pp += alignment) {
88 void *p = *reinterpret_cast<void**>(pp);
89 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
90 // FIXME: PointsIntoChunk is SLOW because GetBlockBegin() in
91 // LargeMmapAllocator involves a lock and a linear search.
92 void *chunk = PointsIntoChunk(p);
93 if (!chunk) continue;
94 LsanMetadata m(chunk);
95 if (m.tag() == kReachable) continue;
96 m.set_tag(tag);
97 if (flags()->log_pointers)
98 Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p,
99 chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(),
100 m.requested_size());
101 if (frontier)
102 frontier->push_back(reinterpret_cast<uptr>(chunk));
103 }
104}
105
106// Scan thread data (stacks and TLS) for heap pointers.
107static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
108 InternalVector<uptr> *frontier) {
109 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
110 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
111 uptr registers_end = registers_begin + registers.size();
112 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
113 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
114 if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
115 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
116 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
117 &tls_begin, &tls_end,
118 &cache_begin, &cache_end);
119 if (!thread_found) {
120 // If a thread can't be found in the thread registry, it's probably in the
121 // process of destruction. Log this event and move on.
122 if (flags()->log_threads)
123 Report("Thread %d not found in registry.\n", os_id);
124 continue;
125 }
126 uptr sp;
127 bool have_registers =
128 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
129 if (!have_registers) {
130 Report("Unable to get registers from thread %d.\n");
131 // If unable to get SP, consider the entire stack to be reachable.
132 sp = stack_begin;
133 }
134
135 if (flags()->use_registers() && have_registers)
136 ScanRangeForPointers(registers_begin, registers_end, frontier,
137 "REGISTERS", kReachable);
138
139 if (flags()->use_stacks()) {
140 if (flags()->log_threads)
141 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
142 if (sp < stack_begin || sp >= stack_end) {
143 // SP is outside the recorded stack range (e.g. the thread is running a
144 // signal handler on alternate stack). Again, consider the entire stack
145 // range to be reachable.
146 if (flags()->log_threads)
147 Report("WARNING: stack_pointer not in stack_range.\n");
148 } else {
149 // Shrink the stack range to ignore out-of-scope values.
150 stack_begin = sp;
151 }
152 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
153 kReachable);
154 }
155
156 if (flags()->use_tls()) {
157 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
Sergey Matveevf5a9ace2013-05-24 18:07:53 +0000158 if (cache_begin == cache_end) {
159 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
160 } else {
161 // Because LSan should not be loaded with dlopen(), we can assume
162 // that allocator cache will be part of static TLS image.
163 CHECK_LE(tls_begin, cache_begin);
164 CHECK_GE(tls_end, cache_end);
165 if (tls_begin < cache_begin)
166 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
167 kReachable);
168 if (tls_end > cache_end)
169 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
170 }
Sergey Matveevab0f7442013-05-20 11:06:50 +0000171 }
172 }
173}
174
175static void FloodFillReachable(InternalVector<uptr> *frontier) {
176 while (frontier->size()) {
177 uptr next_chunk = frontier->back();
178 frontier->pop_back();
179 LsanMetadata m(reinterpret_cast<void *>(next_chunk));
180 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
181 "HEAP", kReachable);
182 }
183}
184
185// Mark leaked chunks which are reachable from other leaked chunks.
186void MarkIndirectlyLeakedCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000187 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000188 LsanMetadata m(p);
189 if (m.allocated() && m.tag() != kReachable) {
190 ScanRangeForPointers(reinterpret_cast<uptr>(p),
191 reinterpret_cast<uptr>(p) + m.requested_size(),
192 /* frontier */ 0, "HEAP", kIndirectlyLeaked);
193 }
194}
195
196// Set the appropriate tag on each chunk.
197static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
198 // Holds the flood fill frontier.
199 InternalVector<uptr> frontier(GetPageSizeCached());
200
201 if (flags()->use_globals())
202 ProcessGlobalRegions(&frontier);
203 ProcessThreads(suspended_threads, &frontier);
204 FloodFillReachable(&frontier);
205 ProcessPlatformSpecificAllocations(&frontier);
206 FloodFillReachable(&frontier);
207
208 // Now all reachable chunks are marked. Iterate over leaked chunks and mark
209 // those that are reachable from other leaked chunks.
210 if (flags()->log_pointers)
211 Report("Now scanning leaked blocks for pointers.\n");
212 ForEachChunk(MarkIndirectlyLeakedCb());
213}
214
215void ClearTagCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000216 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000217 LsanMetadata m(p);
218 m.set_tag(kDirectlyLeaked);
219}
220
221static void PrintStackTraceById(u32 stack_trace_id) {
222 CHECK(stack_trace_id);
223 uptr size = 0;
224 const uptr *trace = StackDepotGet(stack_trace_id, &size);
225 StackTrace::PrintStack(trace, size, common_flags()->symbolize,
226 common_flags()->strip_path_prefix, 0);
227}
228
229static void LockAndSuspendThreads(StopTheWorldCallback callback, void *arg) {
230 LockThreadRegistry();
231 LockAllocator();
232 StopTheWorld(callback, arg);
233 // Allocator must be unlocked by the callback.
234 UnlockThreadRegistry();
235}
236
237///// Normal leak checking. /////
238
239void CollectLeaksCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000240 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000241 LsanMetadata m(p);
242 if (!m.allocated()) return;
243 if (m.tag() != kReachable) {
244 uptr resolution = flags()->resolution;
245 if (resolution > 0) {
246 uptr size = 0;
247 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
248 size = Min(size, resolution);
249 leak_report_->Add(StackDepotPut(trace, size), m.requested_size(),
250 m.tag());
251 } else {
252 leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag());
253 }
254 }
255}
256
257static void CollectLeaks(LeakReport *leak_report) {
258 ForEachChunk(CollectLeaksCb(leak_report));
259}
260
261void PrintLeakedCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000262 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000263 LsanMetadata m(p);
264 if (!m.allocated()) return;
265 if (m.tag() != kReachable) {
266 CHECK(m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked);
267 Printf("%s leaked %llu byte block at %p\n",
268 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
269 m.requested_size(), p);
270 }
271}
272
273static void PrintLeaked() {
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000274 Printf("Reporting individual blocks:\n");
275 Printf("============================\n");
Sergey Matveevab0f7442013-05-20 11:06:50 +0000276 ForEachChunk(PrintLeakedCb());
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000277 Printf("\n");
Sergey Matveevab0f7442013-05-20 11:06:50 +0000278}
279
Sergey Matveev969b5292013-05-24 13:16:02 +0000280enum LeakCheckResult {
281 kFatalError,
282 kLeaksFound,
283 kNoLeaks
284};
285
Sergey Matveevab0f7442013-05-20 11:06:50 +0000286static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
287 void *arg) {
Sergey Matveev969b5292013-05-24 13:16:02 +0000288 LeakCheckResult *result = reinterpret_cast<LeakCheckResult *>(arg);
289 CHECK_EQ(*result, kFatalError);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000290 // Allocator must not be locked when we call GetRegionBegin().
291 UnlockAllocator();
Sergey Matveevab0f7442013-05-20 11:06:50 +0000292 ClassifyAllChunks(suspended_threads);
293 LeakReport leak_report;
294 CollectLeaks(&leak_report);
Sergey Matveev969b5292013-05-24 13:16:02 +0000295 if (leak_report.IsEmpty()) {
296 *result = kNoLeaks;
297 return;
Sergey Matveevab0f7442013-05-20 11:06:50 +0000298 }
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000299 Printf("\n");
300 Printf("=================================================================\n");
301 Report("ERROR: LeakSanitizer: detected leaks.\n");
Sergey Matveev969b5292013-05-24 13:16:02 +0000302 leak_report.PrintLargest(flags()->max_leaks);
303 if (flags()->report_blocks)
304 PrintLeaked();
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000305 leak_report.PrintSummary();
306 Printf("\n");
Sergey Matveevab0f7442013-05-20 11:06:50 +0000307 ForEachChunk(ClearTagCb());
Sergey Matveev969b5292013-05-24 13:16:02 +0000308 *result = kLeaksFound;
Sergey Matveevab0f7442013-05-20 11:06:50 +0000309}
310
311void DoLeakCheck() {
Sergey Matveev969b5292013-05-24 13:16:02 +0000312 LeakCheckResult result = kFatalError;
313 LockAndSuspendThreads(DoLeakCheckCallback, &result);
314 if (result == kFatalError) {
315 Report("LeakSanitizer has encountered a fatal error.\n");
316 Die();
317 } else if (result == kLeaksFound) {
318 if (flags()->exitcode)
319 internal__exit(flags()->exitcode);
320 }
Sergey Matveevab0f7442013-05-20 11:06:50 +0000321}
322
323///// Reporting of leaked blocks' addresses (for testing). /////
324
325void ReportLeakedCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000326 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000327 LsanMetadata m(p);
328 if (m.allocated() && m.tag() != kReachable)
329 leaked_->push_back(p);
330}
331
332struct ReportLeakedParam {
333 InternalVector<void *> *leaked;
334 uptr sources;
335 bool success;
336};
337
338static void ReportLeakedCallback(const SuspendedThreadsList &suspended_threads,
339 void *arg) {
340 // Allocator must not be locked when we call GetRegionBegin().
341 UnlockAllocator();
342 ReportLeakedParam *param = reinterpret_cast<ReportLeakedParam *>(arg);
343 flags()->sources = param->sources;
344 ClassifyAllChunks(suspended_threads);
345 ForEachChunk(ReportLeakedCb(param->leaked));
346 ForEachChunk(ClearTagCb());
347 param->success = true;
348}
349
350void ReportLeaked(InternalVector<void *> *leaked, uptr sources) {
351 CHECK_EQ(0, leaked->size());
352 ReportLeakedParam param;
353 param.leaked = leaked;
354 param.success = false;
355 param.sources = sources;
356 LockAndSuspendThreads(ReportLeakedCallback, &param);
357 CHECK(param.success);
358}
359
360///// LeakReport implementation. /////
361
362// A hard limit on the number of distinct leaks, to avoid quadratic complexity
363// in LeakReport::Add(). We don't expect to ever see this many leaks in
364// real-world applications.
365// FIXME: Get rid of this limit by changing the implementation of LeakReport to
366// use a hash table.
367const uptr kMaxLeaksConsidered = 1000;
368
369void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
370 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
371 bool is_directly_leaked = (tag == kDirectlyLeaked);
372 for (uptr i = 0; i < leaks_.size(); i++)
373 if (leaks_[i].stack_trace_id == stack_trace_id &&
374 leaks_[i].is_directly_leaked == is_directly_leaked) {
375 leaks_[i].hit_count++;
376 leaks_[i].total_size += leaked_size;
377 return;
378 }
379 if (leaks_.size() == kMaxLeaksConsidered) return;
380 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
381 is_directly_leaked };
382 leaks_.push_back(leak);
383}
384
385static bool IsLarger(const Leak &leak1, const Leak &leak2) {
386 return leak1.total_size > leak2.total_size;
387}
388
389void LeakReport::PrintLargest(uptr max_leaks) {
390 CHECK(leaks_.size() <= kMaxLeaksConsidered);
391 Printf("\n");
392 if (leaks_.size() == kMaxLeaksConsidered)
393 Printf("Too many leaks! Only the first %llu leaks encountered will be "
394 "reported.\n",
395 kMaxLeaksConsidered);
396 if (max_leaks > 0 && max_leaks < leaks_.size())
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000397 Printf("The %llu largest leak(s):\n", max_leaks);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000398 InternalSort(&leaks_, leaks_.size(), IsLarger);
399 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size();
400 for (uptr i = 0; i < max_leaks; i++) {
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000401 Printf("%s leak of %llu byte(s) in %llu object(s) allocated from:\n",
Sergey Matveevab0f7442013-05-20 11:06:50 +0000402 leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000403 leaks_[i].total_size, leaks_[i].hit_count);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000404 PrintStackTraceById(leaks_[i].stack_trace_id);
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000405 Printf("\n");
Sergey Matveevab0f7442013-05-20 11:06:50 +0000406 }
407 if (max_leaks < leaks_.size()) {
408 uptr remaining = leaks_.size() - max_leaks;
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000409 Printf("Omitting %llu more leak(s).\n", remaining);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000410 }
411}
Sergey Matveev0bc81772013-05-21 15:35:34 +0000412
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000413void LeakReport::PrintSummary() {
414 CHECK(leaks_.size() <= kMaxLeaksConsidered);
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000415 uptr bytes = 0, allocations = 0;
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000416 for (uptr i = 0; i < leaks_.size(); i++) {
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000417 bytes += leaks_[i].total_size;
418 allocations += leaks_[i].hit_count;
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000419 }
Sergey Matveev8e66cf52013-05-24 15:36:30 +0000420 Printf("SUMMARY: LeakSanitizer: %llu byte(s) leaked in %llu allocation(s).\n",
421 bytes, allocations);
Sergey Matveev6c3634b2013-05-24 14:49:13 +0000422}
Sergey Matveevab0f7442013-05-20 11:06:50 +0000423} // namespace __lsan
Sergey Matveev0bc81772013-05-21 15:35:34 +0000424#endif // CAN_SANITIZE_LEAKS