Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 1 | //=-- lsan_common.cc ------------------------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of LeakSanitizer. |
| 11 | // Implementation of common leak checking functionality. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "lsan_common.h" |
| 16 | |
| 17 | #include "sanitizer_common/sanitizer_common.h" |
| 18 | #include "sanitizer_common/sanitizer_flags.h" |
| 19 | #include "sanitizer_common/sanitizer_stackdepot.h" |
| 20 | #include "sanitizer_common/sanitizer_stacktrace.h" |
| 21 | #include "sanitizer_common/sanitizer_stoptheworld.h" |
| 22 | |
Sergey Matveev | 0bc8177 | 2013-05-21 15:35:34 +0000 | [diff] [blame] | 23 | namespace __lsan { |
Sergey Matveev | ca74cff | 2013-06-21 15:14:57 +0000 | [diff] [blame^] | 24 | #if CAN_SANITIZE_LEAKS |
Sergey Matveev | 0bc8177 | 2013-05-21 15:35:34 +0000 | [diff] [blame] | 25 | |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 26 | // This mutex is used to prevent races between DoLeakCheck and SuppressObject. |
| 27 | BlockingMutex global_mutex(LINKER_INITIALIZED); |
| 28 | |
Sergey Matveev | 200afbd | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 29 | THREADLOCAL int disable_counter; |
Sergey Matveev | 200afbd | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 30 | |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 31 | Flags lsan_flags; |
| 32 | |
| 33 | static void InitializeFlags() { |
| 34 | Flags *f = flags(); |
| 35 | // Default values. |
Sergey Matveev | b3b46da | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 36 | f->report_objects = false; |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 37 | f->resolution = 0; |
| 38 | f->max_leaks = 0; |
Sergey Matveev | 969b529 | 2013-05-24 13:16:02 +0000 | [diff] [blame] | 39 | f->exitcode = 23; |
Sergey Matveev | ebe3a36 | 2013-05-27 11:41:46 +0000 | [diff] [blame] | 40 | f->use_registers = true; |
| 41 | f->use_globals = true; |
| 42 | f->use_stacks = true; |
| 43 | f->use_tls = true; |
| 44 | f->use_unaligned = false; |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 45 | f->verbosity = 0; |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 46 | f->log_pointers = false; |
| 47 | f->log_threads = false; |
| 48 | |
| 49 | const char *options = GetEnv("LSAN_OPTIONS"); |
| 50 | if (options) { |
Sergey Matveev | ebe3a36 | 2013-05-27 11:41:46 +0000 | [diff] [blame] | 51 | ParseFlag(options, &f->use_registers, "use_registers"); |
| 52 | ParseFlag(options, &f->use_globals, "use_globals"); |
| 53 | ParseFlag(options, &f->use_stacks, "use_stacks"); |
| 54 | ParseFlag(options, &f->use_tls, "use_tls"); |
| 55 | ParseFlag(options, &f->use_unaligned, "use_unaligned"); |
Sergey Matveev | b3b46da | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 56 | ParseFlag(options, &f->report_objects, "report_objects"); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 57 | ParseFlag(options, &f->resolution, "resolution"); |
| 58 | CHECK_GE(&f->resolution, 0); |
| 59 | ParseFlag(options, &f->max_leaks, "max_leaks"); |
| 60 | CHECK_GE(&f->max_leaks, 0); |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 61 | ParseFlag(options, &f->verbosity, "verbosity"); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 62 | ParseFlag(options, &f->log_pointers, "log_pointers"); |
| 63 | ParseFlag(options, &f->log_threads, "log_threads"); |
Sergey Matveev | 969b529 | 2013-05-24 13:16:02 +0000 | [diff] [blame] | 64 | ParseFlag(options, &f->exitcode, "exitcode"); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 65 | } |
| 66 | } |
| 67 | |
| 68 | void InitCommonLsan() { |
| 69 | InitializeFlags(); |
| 70 | InitializePlatformSpecificModules(); |
| 71 | } |
| 72 | |
| 73 | static inline bool CanBeAHeapPointer(uptr p) { |
| 74 | // Since our heap is located in mmap-ed memory, we can assume a sensible lower |
| 75 | // boundary on heap addresses. |
| 76 | const uptr kMinAddress = 4 * 4096; |
| 77 | if (p < kMinAddress) return false; |
| 78 | #ifdef __x86_64__ |
| 79 | // Accept only canonical form user-space addresses. |
| 80 | return ((p >> 47) == 0); |
| 81 | #else |
| 82 | return true; |
| 83 | #endif |
| 84 | } |
| 85 | |
| 86 | // Scan the memory range, looking for byte patterns that point into allocator |
| 87 | // chunks. Mark those chunks with tag and add them to the frontier. |
Sergey Matveev | 200afbd | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 88 | // There are two usage modes for this function: finding reachable or ignored |
Sergey Matveev | b3b46da | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 89 | // chunks (tag = kReachable or kIgnored) and finding indirectly leaked chunks |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 90 | // (tag = kIndirectlyLeaked). In the second case, there's no flood fill, |
| 91 | // so frontier = 0. |
Alexey Samsonov | a64d435 | 2013-06-14 09:59:40 +0000 | [diff] [blame] | 92 | void ScanRangeForPointers(uptr begin, uptr end, |
Alexey Samsonov | dbeb48d | 2013-06-14 10:07:56 +0000 | [diff] [blame] | 93 | Frontier *frontier, |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 94 | const char *region_type, ChunkTag tag) { |
| 95 | const uptr alignment = flags()->pointer_alignment(); |
| 96 | if (flags()->log_pointers) |
| 97 | Report("Scanning %s range %p-%p.\n", region_type, begin, end); |
| 98 | uptr pp = begin; |
| 99 | if (pp % alignment) |
| 100 | pp = pp + alignment - pp % alignment; |
Sergey Matveev | ef89d6b | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 101 | for (; pp + sizeof(void *) <= end; pp += alignment) { |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 102 | void *p = *reinterpret_cast<void**>(pp); |
| 103 | if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 104 | void *chunk = PointsIntoChunk(p); |
| 105 | if (!chunk) continue; |
| 106 | LsanMetadata m(chunk); |
Sergey Matveev | 200afbd | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 107 | // Reachable beats ignored beats leaked. |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 108 | if (m.tag() == kReachable) continue; |
Sergey Matveev | b3b46da | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 109 | if (m.tag() == kIgnored && tag != kReachable) continue; |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 110 | m.set_tag(tag); |
| 111 | if (flags()->log_pointers) |
Sergey Matveev | ef89d6b | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 112 | Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 113 | chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(), |
| 114 | m.requested_size()); |
| 115 | if (frontier) |
| 116 | frontier->push_back(reinterpret_cast<uptr>(chunk)); |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | // Scan thread data (stacks and TLS) for heap pointers. |
| 121 | static void ProcessThreads(SuspendedThreadsList const &suspended_threads, |
Alexey Samsonov | dbeb48d | 2013-06-14 10:07:56 +0000 | [diff] [blame] | 122 | Frontier *frontier) { |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 123 | InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); |
| 124 | uptr registers_begin = reinterpret_cast<uptr>(registers.data()); |
| 125 | uptr registers_end = registers_begin + registers.size(); |
| 126 | for (uptr i = 0; i < suspended_threads.thread_count(); i++) { |
| 127 | uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); |
| 128 | if (flags()->log_threads) Report("Processing thread %d.\n", os_id); |
| 129 | uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; |
| 130 | bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, |
| 131 | &tls_begin, &tls_end, |
| 132 | &cache_begin, &cache_end); |
| 133 | if (!thread_found) { |
| 134 | // If a thread can't be found in the thread registry, it's probably in the |
| 135 | // process of destruction. Log this event and move on. |
| 136 | if (flags()->log_threads) |
| 137 | Report("Thread %d not found in registry.\n", os_id); |
| 138 | continue; |
| 139 | } |
| 140 | uptr sp; |
| 141 | bool have_registers = |
| 142 | (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); |
| 143 | if (!have_registers) { |
| 144 | Report("Unable to get registers from thread %d.\n"); |
| 145 | // If unable to get SP, consider the entire stack to be reachable. |
| 146 | sp = stack_begin; |
| 147 | } |
| 148 | |
Sergey Matveev | ebe3a36 | 2013-05-27 11:41:46 +0000 | [diff] [blame] | 149 | if (flags()->use_registers && have_registers) |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 150 | ScanRangeForPointers(registers_begin, registers_end, frontier, |
| 151 | "REGISTERS", kReachable); |
| 152 | |
Sergey Matveev | ebe3a36 | 2013-05-27 11:41:46 +0000 | [diff] [blame] | 153 | if (flags()->use_stacks) { |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 154 | if (flags()->log_threads) |
| 155 | Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); |
| 156 | if (sp < stack_begin || sp >= stack_end) { |
| 157 | // SP is outside the recorded stack range (e.g. the thread is running a |
| 158 | // signal handler on alternate stack). Again, consider the entire stack |
| 159 | // range to be reachable. |
| 160 | if (flags()->log_threads) |
| 161 | Report("WARNING: stack_pointer not in stack_range.\n"); |
| 162 | } else { |
| 163 | // Shrink the stack range to ignore out-of-scope values. |
| 164 | stack_begin = sp; |
| 165 | } |
| 166 | ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", |
| 167 | kReachable); |
| 168 | } |
| 169 | |
Sergey Matveev | ebe3a36 | 2013-05-27 11:41:46 +0000 | [diff] [blame] | 170 | if (flags()->use_tls) { |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 171 | if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); |
Sergey Matveev | f5a9ace | 2013-05-24 18:07:53 +0000 | [diff] [blame] | 172 | if (cache_begin == cache_end) { |
| 173 | ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); |
| 174 | } else { |
| 175 | // Because LSan should not be loaded with dlopen(), we can assume |
| 176 | // that allocator cache will be part of static TLS image. |
| 177 | CHECK_LE(tls_begin, cache_begin); |
| 178 | CHECK_GE(tls_end, cache_end); |
| 179 | if (tls_begin < cache_begin) |
| 180 | ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", |
| 181 | kReachable); |
| 182 | if (tls_end > cache_end) |
| 183 | ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); |
| 184 | } |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 185 | } |
| 186 | } |
| 187 | } |
| 188 | |
Alexey Samsonov | dbeb48d | 2013-06-14 10:07:56 +0000 | [diff] [blame] | 189 | static void FloodFillTag(Frontier *frontier, ChunkTag tag) { |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 190 | while (frontier->size()) { |
| 191 | uptr next_chunk = frontier->back(); |
| 192 | frontier->pop_back(); |
| 193 | LsanMetadata m(reinterpret_cast<void *>(next_chunk)); |
| 194 | ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 195 | "HEAP", tag); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 196 | } |
| 197 | } |
| 198 | |
| 199 | // Mark leaked chunks which are reachable from other leaked chunks. |
| 200 | void MarkIndirectlyLeakedCb::operator()(void *p) const { |
Sergey Matveev | 29b7568 | 2013-05-20 13:08:23 +0000 | [diff] [blame] | 201 | p = GetUserBegin(p); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 202 | LsanMetadata m(p); |
| 203 | if (m.allocated() && m.tag() != kReachable) { |
| 204 | ScanRangeForPointers(reinterpret_cast<uptr>(p), |
| 205 | reinterpret_cast<uptr>(p) + m.requested_size(), |
| 206 | /* frontier */ 0, "HEAP", kIndirectlyLeaked); |
| 207 | } |
| 208 | } |
| 209 | |
Sergey Matveev | 200afbd | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 210 | void CollectIgnoredCb::operator()(void *p) const { |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 211 | p = GetUserBegin(p); |
| 212 | LsanMetadata m(p); |
Sergey Matveev | b3b46da | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 213 | if (m.allocated() && m.tag() == kIgnored) |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 214 | frontier_->push_back(reinterpret_cast<uptr>(p)); |
| 215 | } |
| 216 | |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 217 | // Set the appropriate tag on each chunk. |
| 218 | static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { |
| 219 | // Holds the flood fill frontier. |
Alexey Samsonov | dbeb48d | 2013-06-14 10:07:56 +0000 | [diff] [blame] | 220 | Frontier frontier(GetPageSizeCached()); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 221 | |
Sergey Matveev | ebe3a36 | 2013-05-27 11:41:46 +0000 | [diff] [blame] | 222 | if (flags()->use_globals) |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 223 | ProcessGlobalRegions(&frontier); |
| 224 | ProcessThreads(suspended_threads, &frontier); |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 225 | FloodFillTag(&frontier, kReachable); |
| 226 | // The check here is relatively expensive, so we do this in a separate flood |
| 227 | // fill. That way we can skip the check for chunks that are reachable |
| 228 | // otherwise. |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 229 | ProcessPlatformSpecificAllocations(&frontier); |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 230 | FloodFillTag(&frontier, kReachable); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 231 | |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 232 | if (flags()->log_pointers) |
Sergey Matveev | b3b46da | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 233 | Report("Scanning ignored chunks.\n"); |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 234 | CHECK_EQ(0, frontier.size()); |
Sergey Matveev | 200afbd | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 235 | ForEachChunk(CollectIgnoredCb(&frontier)); |
Sergey Matveev | b3b46da | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 236 | FloodFillTag(&frontier, kIgnored); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 237 | |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 238 | // Iterate over leaked chunks and mark those that are reachable from other |
| 239 | // leaked chunks. |
| 240 | if (flags()->log_pointers) |
Sergey Matveev | b3b46da | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 241 | Report("Scanning leaked chunks.\n"); |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 242 | ForEachChunk(MarkIndirectlyLeakedCb()); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | static void PrintStackTraceById(u32 stack_trace_id) { |
| 246 | CHECK(stack_trace_id); |
| 247 | uptr size = 0; |
| 248 | const uptr *trace = StackDepotGet(stack_trace_id, &size); |
| 249 | StackTrace::PrintStack(trace, size, common_flags()->symbolize, |
| 250 | common_flags()->strip_path_prefix, 0); |
| 251 | } |
| 252 | |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 253 | void CollectLeaksCb::operator()(void *p) const { |
Sergey Matveev | 29b7568 | 2013-05-20 13:08:23 +0000 | [diff] [blame] | 254 | p = GetUserBegin(p); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 255 | LsanMetadata m(p); |
| 256 | if (!m.allocated()) return; |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 257 | if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 258 | uptr resolution = flags()->resolution; |
| 259 | if (resolution > 0) { |
| 260 | uptr size = 0; |
| 261 | const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); |
| 262 | size = Min(size, resolution); |
| 263 | leak_report_->Add(StackDepotPut(trace, size), m.requested_size(), |
| 264 | m.tag()); |
| 265 | } else { |
| 266 | leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag()); |
| 267 | } |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | static void CollectLeaks(LeakReport *leak_report) { |
| 272 | ForEachChunk(CollectLeaksCb(leak_report)); |
| 273 | } |
| 274 | |
| 275 | void PrintLeakedCb::operator()(void *p) const { |
Sergey Matveev | 29b7568 | 2013-05-20 13:08:23 +0000 | [diff] [blame] | 276 | p = GetUserBegin(p); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 277 | LsanMetadata m(p); |
| 278 | if (!m.allocated()) return; |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 279 | if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { |
Sergey Matveev | ef89d6b | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 280 | Printf("%s leaked %zu byte object at %p.\n", |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 281 | m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", |
| 282 | m.requested_size(), p); |
| 283 | } |
| 284 | } |
| 285 | |
| 286 | static void PrintLeaked() { |
Sergey Matveev | 6c3634b | 2013-05-24 14:49:13 +0000 | [diff] [blame] | 287 | Printf("\n"); |
Sergey Matveev | c085fe8 | 2013-06-19 14:04:11 +0000 | [diff] [blame] | 288 | Printf("Reporting individual objects:\n"); |
| 289 | ForEachChunk(PrintLeakedCb()); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 290 | } |
| 291 | |
Sergey Matveev | c085fe8 | 2013-06-19 14:04:11 +0000 | [diff] [blame] | 292 | struct DoLeakCheckParam { |
| 293 | bool success; |
| 294 | LeakReport leak_report; |
Sergey Matveev | 969b529 | 2013-05-24 13:16:02 +0000 | [diff] [blame] | 295 | }; |
| 296 | |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 297 | static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, |
| 298 | void *arg) { |
Sergey Matveev | c085fe8 | 2013-06-19 14:04:11 +0000 | [diff] [blame] | 299 | DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg); |
| 300 | CHECK(param); |
| 301 | CHECK(!param->success); |
| 302 | CHECK(param->leak_report.IsEmpty()); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 303 | ClassifyAllChunks(suspended_threads); |
Sergey Matveev | c085fe8 | 2013-06-19 14:04:11 +0000 | [diff] [blame] | 304 | CollectLeaks(¶m->leak_report); |
| 305 | if (!param->leak_report.IsEmpty() && flags()->report_objects) |
Sergey Matveev | 969b529 | 2013-05-24 13:16:02 +0000 | [diff] [blame] | 306 | PrintLeaked(); |
Sergey Matveev | c085fe8 | 2013-06-19 14:04:11 +0000 | [diff] [blame] | 307 | param->success = true; |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 308 | } |
| 309 | |
| 310 | void DoLeakCheck() { |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 311 | BlockingMutexLock l(&global_mutex); |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 312 | static bool already_done; |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 313 | CHECK(!already_done); |
| 314 | already_done = true; |
Sergey Matveev | c085fe8 | 2013-06-19 14:04:11 +0000 | [diff] [blame] | 315 | |
| 316 | DoLeakCheckParam param; |
| 317 | param.success = false; |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 318 | LockThreadRegistry(); |
| 319 | LockAllocator(); |
Sergey Matveev | c085fe8 | 2013-06-19 14:04:11 +0000 | [diff] [blame] | 320 | StopTheWorld(DoLeakCheckCallback, ¶m); |
Sergey Matveev | 5e719a7 | 2013-06-03 11:21:34 +0000 | [diff] [blame] | 321 | UnlockAllocator(); |
| 322 | UnlockThreadRegistry(); |
Sergey Matveev | c085fe8 | 2013-06-19 14:04:11 +0000 | [diff] [blame] | 323 | |
| 324 | if (!param.success) { |
Sergey Matveev | 969b529 | 2013-05-24 13:16:02 +0000 | [diff] [blame] | 325 | Report("LeakSanitizer has encountered a fatal error.\n"); |
| 326 | Die(); |
Sergey Matveev | c085fe8 | 2013-06-19 14:04:11 +0000 | [diff] [blame] | 327 | } |
| 328 | if (!param.leak_report.IsEmpty()) { |
| 329 | Printf("\n=================================================================" |
| 330 | "\n"); |
| 331 | Report("ERROR: LeakSanitizer: detected memory leaks\n"); |
| 332 | param.leak_report.PrintLargest(flags()->max_leaks); |
| 333 | param.leak_report.PrintSummary(); |
Sergey Matveev | 969b529 | 2013-05-24 13:16:02 +0000 | [diff] [blame] | 334 | if (flags()->exitcode) |
| 335 | internal__exit(flags()->exitcode); |
| 336 | } |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 337 | } |
| 338 | |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 339 | ///// LeakReport implementation. ///// |
| 340 | |
| 341 | // A hard limit on the number of distinct leaks, to avoid quadratic complexity |
| 342 | // in LeakReport::Add(). We don't expect to ever see this many leaks in |
| 343 | // real-world applications. |
| 344 | // FIXME: Get rid of this limit by changing the implementation of LeakReport to |
| 345 | // use a hash table. |
| 346 | const uptr kMaxLeaksConsidered = 1000; |
| 347 | |
| 348 | void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { |
| 349 | CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); |
| 350 | bool is_directly_leaked = (tag == kDirectlyLeaked); |
| 351 | for (uptr i = 0; i < leaks_.size(); i++) |
| 352 | if (leaks_[i].stack_trace_id == stack_trace_id && |
| 353 | leaks_[i].is_directly_leaked == is_directly_leaked) { |
| 354 | leaks_[i].hit_count++; |
| 355 | leaks_[i].total_size += leaked_size; |
| 356 | return; |
| 357 | } |
| 358 | if (leaks_.size() == kMaxLeaksConsidered) return; |
| 359 | Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, |
| 360 | is_directly_leaked }; |
| 361 | leaks_.push_back(leak); |
| 362 | } |
| 363 | |
| 364 | static bool IsLarger(const Leak &leak1, const Leak &leak2) { |
| 365 | return leak1.total_size > leak2.total_size; |
| 366 | } |
| 367 | |
| 368 | void LeakReport::PrintLargest(uptr max_leaks) { |
| 369 | CHECK(leaks_.size() <= kMaxLeaksConsidered); |
| 370 | Printf("\n"); |
| 371 | if (leaks_.size() == kMaxLeaksConsidered) |
Sergey Matveev | ef89d6b | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 372 | Printf("Too many leaks! Only the first %zu leaks encountered will be " |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 373 | "reported.\n", |
| 374 | kMaxLeaksConsidered); |
| 375 | if (max_leaks > 0 && max_leaks < leaks_.size()) |
Sergey Matveev | ef89d6b | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 376 | Printf("The %zu largest leak(s):\n", max_leaks); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 377 | InternalSort(&leaks_, leaks_.size(), IsLarger); |
| 378 | max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size(); |
| 379 | for (uptr i = 0; i < max_leaks; i++) { |
Sergey Matveev | ef89d6b | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 380 | Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 381 | leaks_[i].is_directly_leaked ? "Direct" : "Indirect", |
Sergey Matveev | 8e66cf5 | 2013-05-24 15:36:30 +0000 | [diff] [blame] | 382 | leaks_[i].total_size, leaks_[i].hit_count); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 383 | PrintStackTraceById(leaks_[i].stack_trace_id); |
Sergey Matveev | 6c3634b | 2013-05-24 14:49:13 +0000 | [diff] [blame] | 384 | Printf("\n"); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 385 | } |
| 386 | if (max_leaks < leaks_.size()) { |
| 387 | uptr remaining = leaks_.size() - max_leaks; |
Sergey Matveev | ef89d6b | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 388 | Printf("Omitting %zu more leak(s).\n", remaining); |
Sergey Matveev | ab0f744 | 2013-05-20 11:06:50 +0000 | [diff] [blame] | 389 | } |
| 390 | } |
Sergey Matveev | 0bc8177 | 2013-05-21 15:35:34 +0000 | [diff] [blame] | 391 | |
Sergey Matveev | 6c3634b | 2013-05-24 14:49:13 +0000 | [diff] [blame] | 392 | void LeakReport::PrintSummary() { |
| 393 | CHECK(leaks_.size() <= kMaxLeaksConsidered); |
Sergey Matveev | 8e66cf5 | 2013-05-24 15:36:30 +0000 | [diff] [blame] | 394 | uptr bytes = 0, allocations = 0; |
Sergey Matveev | 6c3634b | 2013-05-24 14:49:13 +0000 | [diff] [blame] | 395 | for (uptr i = 0; i < leaks_.size(); i++) { |
Sergey Matveev | 8e66cf5 | 2013-05-24 15:36:30 +0000 | [diff] [blame] | 396 | bytes += leaks_[i].total_size; |
| 397 | allocations += leaks_[i].hit_count; |
Sergey Matveev | 6c3634b | 2013-05-24 14:49:13 +0000 | [diff] [blame] | 398 | } |
Sergey Matveev | 200afbd | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 399 | Printf( |
Sergey Matveev | ef89d6b | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 400 | "SUMMARY: LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).\n\n", |
Sergey Matveev | 200afbd | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 401 | bytes, allocations); |
Sergey Matveev | 6c3634b | 2013-05-24 14:49:13 +0000 | [diff] [blame] | 402 | } |
Sergey Matveev | 9b618a7 | 2013-06-20 13:39:42 +0000 | [diff] [blame] | 403 | #endif // CAN_SANITIZE_LEAKS |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 404 | |
Sergey Matveev | ca74cff | 2013-06-21 15:14:57 +0000 | [diff] [blame^] | 405 | bool DisabledInThisThread() { |
| 406 | #ifdef CAN_SANITIZE_LEAKS |
| 407 | return disable_counter > 0; |
| 408 | #endif |
| 409 | return false; |
| 410 | } |
| 411 | } // namespace __lsan |
| 412 | |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 413 | using namespace __lsan; // NOLINT |
| 414 | |
| 415 | extern "C" { |
Sergey Matveev | 46ed75f | 2013-06-06 18:40:55 +0000 | [diff] [blame] | 416 | SANITIZER_INTERFACE_ATTRIBUTE |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 417 | void __lsan_ignore_object(const void *p) { |
Sergey Matveev | 9b618a7 | 2013-06-20 13:39:42 +0000 | [diff] [blame] | 418 | #if CAN_SANITIZE_LEAKS |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 419 | // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not |
| 420 | // locked. |
| 421 | BlockingMutexLock l(&global_mutex); |
| 422 | IgnoreObjectResult res = IgnoreObjectLocked(p); |
| 423 | if (res == kIgnoreObjectInvalid && flags()->verbosity >= 1) |
| 424 | Report("__lsan_ignore_object(): no heap object found at %p", p); |
| 425 | if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 1) |
| 426 | Report("__lsan_ignore_object(): " |
| 427 | "heap object at %p is already being ignored\n", p); |
| 428 | if (res == kIgnoreObjectSuccess && flags()->verbosity >= 2) |
| 429 | Report("__lsan_ignore_object(): ignoring heap object at %p\n", p); |
Sergey Matveev | 9b618a7 | 2013-06-20 13:39:42 +0000 | [diff] [blame] | 430 | #endif // CAN_SANITIZE_LEAKS |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 431 | } |
Sergey Matveev | 200afbd | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 432 | |
| 433 | SANITIZER_INTERFACE_ATTRIBUTE |
| 434 | void __lsan_disable() { |
| 435 | #if CAN_SANITIZE_LEAKS |
| 436 | __lsan::disable_counter++; |
| 437 | #endif |
| 438 | } |
| 439 | |
| 440 | SANITIZER_INTERFACE_ATTRIBUTE |
| 441 | void __lsan_enable() { |
| 442 | #if CAN_SANITIZE_LEAKS |
| 443 | if (!__lsan::disable_counter) { |
| 444 | Report("Unmatched call to __lsan_enable().\n"); |
| 445 | Die(); |
| 446 | } |
| 447 | __lsan::disable_counter--; |
| 448 | #endif |
| 449 | } |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 450 | } // extern "C" |