blob: 18d913dfbc5f727f1d98066f0e6d2ed4820680d8 [file] [log] [blame]
Sergey Matveevab0f7442013-05-20 11:06:50 +00001//=-- lsan_common.cc ------------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "sanitizer_common/sanitizer_stoptheworld.h"
22
23namespace __lsan {
Sergey Matveev9bdf7802013-05-21 14:12:11 +000024#if CAN_SANITIZE_LEAKS
Sergey Matveevab0f7442013-05-20 11:06:50 +000025Flags lsan_flags;
26
27static void InitializeFlags() {
28 Flags *f = flags();
29 // Default values.
30 f->sources = kSourceAllAligned;
31 f->report_blocks = false;
32 f->resolution = 0;
33 f->max_leaks = 0;
34 f->log_pointers = false;
35 f->log_threads = false;
36
37 const char *options = GetEnv("LSAN_OPTIONS");
38 if (options) {
39 bool aligned = true;
40 ParseFlag(options, &aligned, "aligned");
41 if (!aligned) f->sources |= kSourceUnaligned;
42 ParseFlag(options, &f->report_blocks, "report_blocks");
43 ParseFlag(options, &f->resolution, "resolution");
44 CHECK_GE(&f->resolution, 0);
45 ParseFlag(options, &f->max_leaks, "max_leaks");
46 CHECK_GE(&f->max_leaks, 0);
47 ParseFlag(options, &f->log_pointers, "log_pointers");
48 ParseFlag(options, &f->log_threads, "log_threads");
49 }
50}
51
52void InitCommonLsan() {
53 InitializeFlags();
54 InitializePlatformSpecificModules();
55}
56
57static inline bool CanBeAHeapPointer(uptr p) {
58 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
59 // boundary on heap addresses.
60 const uptr kMinAddress = 4 * 4096;
61 if (p < kMinAddress) return false;
62#ifdef __x86_64__
63 // Accept only canonical form user-space addresses.
64 return ((p >> 47) == 0);
65#else
66 return true;
67#endif
68}
69
70// Scan the memory range, looking for byte patterns that point into allocator
71// chunks. Mark those chunks with tag and add them to the frontier.
72// There are two usage modes for this function: finding non-leaked chunks
73// (tag = kReachable) and finding indirectly leaked chunks
74// (tag = kIndirectlyLeaked). In the second case, there's no flood fill,
75// so frontier = 0.
76void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier,
77 const char *region_type, ChunkTag tag) {
78 const uptr alignment = flags()->pointer_alignment();
79 if (flags()->log_pointers)
80 Report("Scanning %s range %p-%p.\n", region_type, begin, end);
81 uptr pp = begin;
82 if (pp % alignment)
83 pp = pp + alignment - pp % alignment;
84 for (; pp + sizeof(uptr) <= end; pp += alignment) {
85 void *p = *reinterpret_cast<void**>(pp);
86 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
87 // FIXME: PointsIntoChunk is SLOW because GetBlockBegin() in
88 // LargeMmapAllocator involves a lock and a linear search.
89 void *chunk = PointsIntoChunk(p);
90 if (!chunk) continue;
91 LsanMetadata m(chunk);
92 if (m.tag() == kReachable) continue;
93 m.set_tag(tag);
94 if (flags()->log_pointers)
95 Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p,
96 chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(),
97 m.requested_size());
98 if (frontier)
99 frontier->push_back(reinterpret_cast<uptr>(chunk));
100 }
101}
102
103// Scan thread data (stacks and TLS) for heap pointers.
104static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
105 InternalVector<uptr> *frontier) {
106 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
107 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
108 uptr registers_end = registers_begin + registers.size();
109 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
110 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
111 if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
112 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
113 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
114 &tls_begin, &tls_end,
115 &cache_begin, &cache_end);
116 if (!thread_found) {
117 // If a thread can't be found in the thread registry, it's probably in the
118 // process of destruction. Log this event and move on.
119 if (flags()->log_threads)
120 Report("Thread %d not found in registry.\n", os_id);
121 continue;
122 }
123 uptr sp;
124 bool have_registers =
125 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
126 if (!have_registers) {
127 Report("Unable to get registers from thread %d.\n");
128 // If unable to get SP, consider the entire stack to be reachable.
129 sp = stack_begin;
130 }
131
132 if (flags()->use_registers() && have_registers)
133 ScanRangeForPointers(registers_begin, registers_end, frontier,
134 "REGISTERS", kReachable);
135
136 if (flags()->use_stacks()) {
137 if (flags()->log_threads)
138 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
139 if (sp < stack_begin || sp >= stack_end) {
140 // SP is outside the recorded stack range (e.g. the thread is running a
141 // signal handler on alternate stack). Again, consider the entire stack
142 // range to be reachable.
143 if (flags()->log_threads)
144 Report("WARNING: stack_pointer not in stack_range.\n");
145 } else {
146 // Shrink the stack range to ignore out-of-scope values.
147 stack_begin = sp;
148 }
149 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
150 kReachable);
151 }
152
153 if (flags()->use_tls()) {
154 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
155 // Because LSan should not be loaded with dlopen(), we can assume
156 // that allocator cache will be part of static TLS image.
157 CHECK_LE(tls_begin, cache_begin);
158 CHECK_GE(tls_end, cache_end);
159 if (tls_begin < cache_begin)
160 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
161 kReachable);
162 if (tls_end > cache_end)
163 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
164 }
165 }
166}
167
168static void FloodFillReachable(InternalVector<uptr> *frontier) {
169 while (frontier->size()) {
170 uptr next_chunk = frontier->back();
171 frontier->pop_back();
172 LsanMetadata m(reinterpret_cast<void *>(next_chunk));
173 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
174 "HEAP", kReachable);
175 }
176}
177
178// Mark leaked chunks which are reachable from other leaked chunks.
179void MarkIndirectlyLeakedCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000180 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000181 LsanMetadata m(p);
182 if (m.allocated() && m.tag() != kReachable) {
183 ScanRangeForPointers(reinterpret_cast<uptr>(p),
184 reinterpret_cast<uptr>(p) + m.requested_size(),
185 /* frontier */ 0, "HEAP", kIndirectlyLeaked);
186 }
187}
188
189// Set the appropriate tag on each chunk.
190static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
191 // Holds the flood fill frontier.
192 InternalVector<uptr> frontier(GetPageSizeCached());
193
194 if (flags()->use_globals())
195 ProcessGlobalRegions(&frontier);
196 ProcessThreads(suspended_threads, &frontier);
197 FloodFillReachable(&frontier);
198 ProcessPlatformSpecificAllocations(&frontier);
199 FloodFillReachable(&frontier);
200
201 // Now all reachable chunks are marked. Iterate over leaked chunks and mark
202 // those that are reachable from other leaked chunks.
203 if (flags()->log_pointers)
204 Report("Now scanning leaked blocks for pointers.\n");
205 ForEachChunk(MarkIndirectlyLeakedCb());
206}
207
208void ClearTagCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000209 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000210 LsanMetadata m(p);
211 m.set_tag(kDirectlyLeaked);
212}
213
214static void PrintStackTraceById(u32 stack_trace_id) {
215 CHECK(stack_trace_id);
216 uptr size = 0;
217 const uptr *trace = StackDepotGet(stack_trace_id, &size);
218 StackTrace::PrintStack(trace, size, common_flags()->symbolize,
219 common_flags()->strip_path_prefix, 0);
220}
221
222static void LockAndSuspendThreads(StopTheWorldCallback callback, void *arg) {
223 LockThreadRegistry();
224 LockAllocator();
225 StopTheWorld(callback, arg);
226 // Allocator must be unlocked by the callback.
227 UnlockThreadRegistry();
228}
229
230///// Normal leak checking. /////
231
232void CollectLeaksCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000233 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000234 LsanMetadata m(p);
235 if (!m.allocated()) return;
236 if (m.tag() != kReachable) {
237 uptr resolution = flags()->resolution;
238 if (resolution > 0) {
239 uptr size = 0;
240 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
241 size = Min(size, resolution);
242 leak_report_->Add(StackDepotPut(trace, size), m.requested_size(),
243 m.tag());
244 } else {
245 leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag());
246 }
247 }
248}
249
250static void CollectLeaks(LeakReport *leak_report) {
251 ForEachChunk(CollectLeaksCb(leak_report));
252}
253
254void PrintLeakedCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000255 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000256 LsanMetadata m(p);
257 if (!m.allocated()) return;
258 if (m.tag() != kReachable) {
259 CHECK(m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked);
260 Printf("%s leaked %llu byte block at %p\n",
261 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
262 m.requested_size(), p);
263 }
264}
265
266static void PrintLeaked() {
267 Printf("\nReporting individual blocks:\n");
268 ForEachChunk(PrintLeakedCb());
269}
270
271static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
272 void *arg) {
273 // Allocator must not be locked when we call GetRegionBegin().
274 UnlockAllocator();
275 bool *success = reinterpret_cast<bool *>(arg);
276 ClassifyAllChunks(suspended_threads);
277 LeakReport leak_report;
278 CollectLeaks(&leak_report);
279 if (!leak_report.IsEmpty()) {
280 leak_report.PrintLargest(flags()->max_leaks);
281 if (flags()->report_blocks)
282 PrintLeaked();
283 }
284 ForEachChunk(ClearTagCb());
285 *success = true;
286}
287
288void DoLeakCheck() {
289 bool success = false;
290 LockAndSuspendThreads(DoLeakCheckCallback, &success);
291 if (!success)
292 Report("Leak check failed!\n");
293}
294
295///// Reporting of leaked blocks' addresses (for testing). /////
296
297void ReportLeakedCb::operator()(void *p) const {
Sergey Matveev29b75682013-05-20 13:08:23 +0000298 p = GetUserBegin(p);
Sergey Matveevab0f7442013-05-20 11:06:50 +0000299 LsanMetadata m(p);
300 if (m.allocated() && m.tag() != kReachable)
301 leaked_->push_back(p);
302}
303
304struct ReportLeakedParam {
305 InternalVector<void *> *leaked;
306 uptr sources;
307 bool success;
308};
309
310static void ReportLeakedCallback(const SuspendedThreadsList &suspended_threads,
311 void *arg) {
312 // Allocator must not be locked when we call GetRegionBegin().
313 UnlockAllocator();
314 ReportLeakedParam *param = reinterpret_cast<ReportLeakedParam *>(arg);
315 flags()->sources = param->sources;
316 ClassifyAllChunks(suspended_threads);
317 ForEachChunk(ReportLeakedCb(param->leaked));
318 ForEachChunk(ClearTagCb());
319 param->success = true;
320}
321
322void ReportLeaked(InternalVector<void *> *leaked, uptr sources) {
323 CHECK_EQ(0, leaked->size());
324 ReportLeakedParam param;
325 param.leaked = leaked;
326 param.success = false;
327 param.sources = sources;
328 LockAndSuspendThreads(ReportLeakedCallback, &param);
329 CHECK(param.success);
330}
331
332///// LeakReport implementation. /////
333
334// A hard limit on the number of distinct leaks, to avoid quadratic complexity
335// in LeakReport::Add(). We don't expect to ever see this many leaks in
336// real-world applications.
337// FIXME: Get rid of this limit by changing the implementation of LeakReport to
338// use a hash table.
339const uptr kMaxLeaksConsidered = 1000;
340
341void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
342 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
343 bool is_directly_leaked = (tag == kDirectlyLeaked);
344 for (uptr i = 0; i < leaks_.size(); i++)
345 if (leaks_[i].stack_trace_id == stack_trace_id &&
346 leaks_[i].is_directly_leaked == is_directly_leaked) {
347 leaks_[i].hit_count++;
348 leaks_[i].total_size += leaked_size;
349 return;
350 }
351 if (leaks_.size() == kMaxLeaksConsidered) return;
352 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
353 is_directly_leaked };
354 leaks_.push_back(leak);
355}
356
357static bool IsLarger(const Leak &leak1, const Leak &leak2) {
358 return leak1.total_size > leak2.total_size;
359}
360
361void LeakReport::PrintLargest(uptr max_leaks) {
362 CHECK(leaks_.size() <= kMaxLeaksConsidered);
363 Printf("\n");
364 if (leaks_.size() == kMaxLeaksConsidered)
365 Printf("Too many leaks! Only the first %llu leaks encountered will be "
366 "reported.\n",
367 kMaxLeaksConsidered);
368 if (max_leaks > 0 && max_leaks < leaks_.size())
369 Printf("The %llu largest leak%s:\n", max_leaks, max_leaks > 1 ? "s" : "");
370 InternalSort(&leaks_, leaks_.size(), IsLarger);
371 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size();
372 for (uptr i = 0; i < max_leaks; i++) {
373 Printf("\n%s leak of %llu bytes in %llu objects allocated from:\n",
374 leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
375 leaks_[i].total_size, leaks_[i].hit_count);
376 PrintStackTraceById(leaks_[i].stack_trace_id);
377 }
378 if (max_leaks < leaks_.size()) {
379 uptr remaining = leaks_.size() - max_leaks;
380 Printf("\nOmitting %llu more leak%s.\n", remaining,
381 remaining > 1 ? "s" : "");
382 }
383}
Sergey Matveev9bdf7802013-05-21 14:12:11 +0000384#else // CAN_SANITIZE_LEAKS
385void InitCommonLsan() {}
386void DoLeakCheck() {}
387#endif // CAN_SANITIZE_LEAKS
Sergey Matveevab0f7442013-05-20 11:06:50 +0000388} // namespace __lsan