blob: 7e15e116f18c72230b74f71a742ce4a222f9c338 [file] [log] [blame]
Colin Cross7add50d2016-01-14 15:35:40 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <inttypes.h>
18
19#include <functional>
20#include <iomanip>
21#include <mutex>
22#include <string>
23#include <sstream>
24
25#include <backtrace.h>
26#include <android-base/macros.h>
27
28#include "Allocator.h"
29#include "HeapWalker.h"
Colin Cross8e8f34c2016-03-02 17:53:39 -080030#include "LeakFolding.h"
Colin Cross7add50d2016-01-14 15:35:40 -080031#include "LeakPipe.h"
32#include "ProcessMappings.h"
33#include "PtracerThread.h"
34#include "ScopedDisableMalloc.h"
35#include "Semaphore.h"
36#include "ThreadCapture.h"
37
38#include "memunreachable/memunreachable.h"
39#include "bionic.h"
40#include "log.h"
41
42const size_t Leak::contents_length;
43
44using namespace std::chrono_literals;
45
46class MemUnreachable {
47 public:
48 MemUnreachable(pid_t pid, Allocator<void> allocator) : pid_(pid), allocator_(allocator),
49 heap_walker_(allocator_) {}
50 bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
51 const allocator::vector<Mapping>& mappings);
52 bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
53 size_t* num_leaks, size_t* leak_bytes);
54 size_t Allocations() { return heap_walker_.Allocations(); }
55 size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
56 private:
57 bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
58 allocator::vector<Mapping>& heap_mappings,
59 allocator::vector<Mapping>& anon_mappings,
60 allocator::vector<Mapping>& globals_mappings,
61 allocator::vector<Mapping>& stack_mappings);
62 DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
63 pid_t pid_;
64 Allocator<void> allocator_;
65 HeapWalker heap_walker_;
66};
67
68static void HeapIterate(const Mapping& heap_mapping,
69 const std::function<void(uintptr_t, size_t)>& func) {
70 malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
71 [](uintptr_t base, size_t size, void* arg) {
72 auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
73 (*f)(base, size);
74 }, const_cast<void*>(reinterpret_cast<const void*>(&func)));
75}
76
77bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
78 const allocator::vector<Mapping>& mappings) {
79 ALOGI("searching process %d for allocations", pid_);
80 allocator::vector<Mapping> heap_mappings{mappings};
81 allocator::vector<Mapping> anon_mappings{mappings};
82 allocator::vector<Mapping> globals_mappings{mappings};
83 allocator::vector<Mapping> stack_mappings{mappings};
84 if (!ClassifyMappings(mappings, heap_mappings, anon_mappings,
85 globals_mappings, stack_mappings)) {
86 return false;
87 }
88
89 for (auto it = heap_mappings.begin(); it != heap_mappings.end(); it++) {
90 ALOGV("Heap mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
91 HeapIterate(*it, [&](uintptr_t base, size_t size) {
92 heap_walker_.Allocation(base, base + size);
93 });
94 }
95
96 for (auto it = anon_mappings.begin(); it != anon_mappings.end(); it++) {
97 ALOGV("Anon mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
98 heap_walker_.Allocation(it->begin, it->end);
99 }
100
101 for (auto it = globals_mappings.begin(); it != globals_mappings.end(); it++) {
102 ALOGV("Globals mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
103 heap_walker_.Root(it->begin, it->end);
104 }
105
106 for (auto thread_it = threads.begin(); thread_it != threads.end(); thread_it++) {
107 for (auto it = stack_mappings.begin(); it != stack_mappings.end(); it++) {
108 if (thread_it->stack.first >= it->begin && thread_it->stack.first <= it->end) {
109 ALOGV("Stack %" PRIxPTR "-%" PRIxPTR " %s", thread_it->stack.first, it->end, it->name);
110 heap_walker_.Root(thread_it->stack.first, it->end);
111 }
112 }
113 heap_walker_.Root(thread_it->regs);
114 }
115
116 ALOGI("searching done");
117
118 return true;
119}
120
121bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
122 size_t* num_leaks, size_t* leak_bytes) {
123 ALOGI("sweeping process %d for unreachable memory", pid_);
124 leaks.clear();
125
Colin Cross8e8f34c2016-03-02 17:53:39 -0800126 if (!heap_walker_.DetectLeaks()) {
127 return false;
128 }
129
130 LeakFolding folding(allocator_, heap_walker_);
131 if (!folding.FoldLeaks()) {
132 return false;
133 }
134
135 allocator::vector<LeakFolding::Leak> leaked{allocator_};
136
137 if (!folding.Leaked(leaked, limit, num_leaks, leak_bytes)) {
Colin Cross7add50d2016-01-14 15:35:40 -0800138 return false;
139 }
140
141 for (auto it = leaked.begin(); it != leaked.end(); it++) {
142 Leak leak{};
Colin Cross8e8f34c2016-03-02 17:53:39 -0800143 leak.begin = it->range.begin;
144 leak.size = it->range.size();
145 leak.referenced_count = it->referenced_count;
146 leak.referenced_size = it->referenced_size;
147 memcpy(leak.contents, reinterpret_cast<void*>(it->range.begin),
Colin Cross7add50d2016-01-14 15:35:40 -0800148 std::min(leak.size, Leak::contents_length));
Colin Cross8e8f34c2016-03-02 17:53:39 -0800149 ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it->range.begin),
Colin Cross7add50d2016-01-14 15:35:40 -0800150 leak.backtrace_frames, leak.backtrace_length);
151 if (num_backtrace_frames > 0) {
152 leak.num_backtrace_frames = num_backtrace_frames;
153 }
154 leaks.emplace_back(leak);
155 }
156
157 ALOGI("sweeping done");
158
159 return true;
160}
161
162static bool has_prefix(const allocator::string& s, const char* prefix) {
163 int ret = s.compare(0, strlen(prefix), prefix);
164 return ret == 0;
165}
166
167bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
168 allocator::vector<Mapping>& heap_mappings,
169 allocator::vector<Mapping>& anon_mappings,
170 allocator::vector<Mapping>& globals_mappings,
171 allocator::vector<Mapping>& stack_mappings)
172{
173 heap_mappings.clear();
174 anon_mappings.clear();
175 globals_mappings.clear();
176 stack_mappings.clear();
177
178 allocator::string current_lib{allocator_};
179
180 for (auto it = mappings.begin(); it != mappings.end(); it++) {
181 if (it->execute) {
182 current_lib = it->name;
183 continue;
184 }
185
186 if (!it->read) {
187 continue;
188 }
189
190 const allocator::string mapping_name{it->name, allocator_};
191 if (mapping_name == "[anon:.bss]") {
192 // named .bss section
193 globals_mappings.emplace_back(*it);
194 } else if (mapping_name == current_lib) {
195 // .rodata or .data section
196 globals_mappings.emplace_back(*it);
197 } else if (mapping_name == "[anon:libc_malloc]") {
198 // named malloc mapping
199 heap_mappings.emplace_back(*it);
200 } else if (has_prefix(mapping_name, "/dev/ashmem/dalvik")) {
201 // named dalvik heap mapping
202 globals_mappings.emplace_back(*it);
203 } else if (has_prefix(mapping_name, "[stack")) {
204 // named stack mapping
205 stack_mappings.emplace_back(*it);
206 } else if (mapping_name.size() == 0) {
207 globals_mappings.emplace_back(*it);
208 } else if (has_prefix(mapping_name, "[anon:") && mapping_name != "[anon:leak_detector_malloc]") {
209 // TODO(ccross): it would be nice to treat named anonymous mappings as
210 // possible leaks, but naming something in a .bss or .data section makes
211 // it impossible to distinguish them from mmaped and then named mappings.
212 globals_mappings.emplace_back(*it);
213 }
214 }
215
216 return true;
217}
218
219bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
220 int parent_pid = getpid();
221 int parent_tid = gettid();
222
223 Heap heap;
224
225 Semaphore continue_parent_sem;
226 LeakPipe pipe;
227
228 PtracerThread thread{[&]() -> int {
229 /////////////////////////////////////////////
230 // Collection thread
231 /////////////////////////////////////////////
232 ALOGI("collecting thread info for process %d...", parent_pid);
233
234 ThreadCapture thread_capture(parent_pid, heap);
235 allocator::vector<ThreadInfo> thread_info(heap);
236 allocator::vector<Mapping> mappings(heap);
237
238 // ptrace all the threads
239 if (!thread_capture.CaptureThreads()) {
Colin Crossde42af02016-01-14 15:35:40 -0800240 continue_parent_sem.Post();
Colin Cross7add50d2016-01-14 15:35:40 -0800241 return 1;
242 }
243
244 // collect register contents and stacks
245 if (!thread_capture.CapturedThreadInfo(thread_info)) {
Colin Crossde42af02016-01-14 15:35:40 -0800246 continue_parent_sem.Post();
Colin Cross7add50d2016-01-14 15:35:40 -0800247 return 1;
248 }
249
250 // snapshot /proc/pid/maps
251 if (!ProcessMappings(parent_pid, mappings)) {
Colin Crossde42af02016-01-14 15:35:40 -0800252 continue_parent_sem.Post();
Colin Cross7add50d2016-01-14 15:35:40 -0800253 return 1;
254 }
255
256 // malloc must be enabled to call fork, at_fork handlers take the same
257 // locks as ScopedDisableMalloc. All threads are paused in ptrace, so
258 // memory state is still consistent. Unfreeze the original thread so it
259 // can drop the malloc locks, it will block until the collection thread
260 // exits.
261 thread_capture.ReleaseThread(parent_tid);
262 continue_parent_sem.Post();
263
264 // fork a process to do the heap walking
265 int ret = fork();
266 if (ret < 0) {
267 return 1;
268 } else if (ret == 0) {
269 /////////////////////////////////////////////
270 // Heap walker process
271 /////////////////////////////////////////////
272 // Examine memory state in the child using the data collected above and
273 // the CoW snapshot of the process memory contents.
274
275 if (!pipe.OpenSender()) {
276 _exit(1);
277 }
278
279 MemUnreachable unreachable{parent_pid, heap};
280
281 if (!unreachable.CollectAllocations(thread_info, mappings)) {
282 _exit(2);
283 }
284 size_t num_allocations = unreachable.Allocations();
285 size_t allocation_bytes = unreachable.AllocationBytes();
286
287 allocator::vector<Leak> leaks{heap};
288
289 size_t num_leaks = 0;
290 size_t leak_bytes = 0;
291 bool ok = unreachable.GetUnreachableMemory(leaks, limit, &num_leaks, &leak_bytes);
292
293 ok = ok && pipe.Sender().Send(num_allocations);
294 ok = ok && pipe.Sender().Send(allocation_bytes);
295 ok = ok && pipe.Sender().Send(num_leaks);
296 ok = ok && pipe.Sender().Send(leak_bytes);
297 ok = ok && pipe.Sender().SendVector(leaks);
298
299 if (!ok) {
300 _exit(3);
301 }
302
303 _exit(0);
304 } else {
305 // Nothing left to do in the collection thread, return immediately,
306 // releasing all the captured threads.
307 ALOGI("collection thread done");
308 return 0;
309 }
310 }};
311
312 /////////////////////////////////////////////
313 // Original thread
314 /////////////////////////////////////////////
315
316 {
317 // Disable malloc to get a consistent view of memory
318 ScopedDisableMalloc disable_malloc;
319
320 // Start the collection thread
321 thread.Start();
322
323 // Wait for the collection thread to signal that it is ready to fork the
324 // heap walker process.
Colin Crossde42af02016-01-14 15:35:40 -0800325 continue_parent_sem.Wait(30s);
Colin Cross7add50d2016-01-14 15:35:40 -0800326
327 // Re-enable malloc so the collection thread can fork.
328 }
329
330 // Wait for the collection thread to exit
331 int ret = thread.Join();
332 if (ret != 0) {
333 return false;
334 }
335
336 // Get a pipe from the heap walker process. Transferring a new pipe fd
337 // ensures no other forked processes can have it open, so when the heap
338 // walker process dies the remote side of the pipe will close.
339 if (!pipe.OpenReceiver()) {
340 return false;
341 }
342
343 bool ok = true;
344 ok = ok && pipe.Receiver().Receive(&info.num_allocations);
345 ok = ok && pipe.Receiver().Receive(&info.allocation_bytes);
346 ok = ok && pipe.Receiver().Receive(&info.num_leaks);
347 ok = ok && pipe.Receiver().Receive(&info.leak_bytes);
348 ok = ok && pipe.Receiver().ReceiveVector(info.leaks);
349 if (!ok) {
350 return false;
351 }
352
353 ALOGI("unreachable memory detection done");
354 ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s",
355 info.leak_bytes, info.num_leaks, info.num_leaks == 1 ? "" : "s",
356 info.allocation_bytes, info.num_allocations, info.num_allocations == 1 ? "" : "s");
357
358 return true;
359}
360
361std::string Leak::ToString(bool log_contents) const {
362
363 std::ostringstream oss;
364
365 oss << " " << std::dec << size;
Colin Crossde42af02016-01-14 15:35:40 -0800366 oss << " bytes unreachable at ";
Colin Cross7add50d2016-01-14 15:35:40 -0800367 oss << std::hex << begin;
Colin Cross8e8f34c2016-03-02 17:53:39 -0800368 if (referenced_count > 0) {
369 oss << " referencing " << std::dec << referenced_size << " unreachable bytes";
370 oss << " in " << referenced_count;
371 oss << " allocation" << ((referenced_count == 1) ? "" : "s");
372 }
Colin Cross7add50d2016-01-14 15:35:40 -0800373 oss << std::endl;
374
375 if (log_contents) {
376 const int bytes_per_line = 16;
377 const size_t bytes = std::min(size, contents_length);
378
379 if (bytes == size) {
380 oss << " contents:" << std::endl;
381 } else {
382 oss << " first " << bytes << " bytes of contents:" << std::endl;
383 }
384
385 for (size_t i = 0; i < bytes; i += bytes_per_line) {
386 oss << " " << std::hex << begin + i << ": ";
387 size_t j;
388 oss << std::setfill('0');
389 for (j = i; j < bytes && j < i + bytes_per_line; j++) {
390 oss << std::setw(2) << static_cast<int>(contents[j]) << " ";
391 }
392 oss << std::setfill(' ');
393 for (; j < i + bytes_per_line; j++) {
394 oss << " ";
395 }
396 for (j = i; j < bytes && j < i + bytes_per_line; j++) {
397 char c = contents[j];
398 if (c < ' ' || c >= 0x7f) {
399 c = '.';
400 }
401 oss << c;
402 }
403 oss << std::endl;
404 }
405 }
406 if (num_backtrace_frames > 0) {
407 oss << backtrace_string(backtrace_frames, num_backtrace_frames);
408 }
409
410 return oss.str();
411}
412
413std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
414 std::ostringstream oss;
415 oss << " " << leak_bytes << " bytes in ";
416 oss << num_leaks << " unreachable allocation" << (num_leaks == 1 ? "" : "s");
417 oss << std::endl;
418
419 for (auto it = leaks.begin(); it != leaks.end(); it++) {
420 oss << it->ToString(log_contents);
421 }
422
423 return oss.str();
424}
425
426std::string GetUnreachableMemoryString(bool log_contents, size_t limit) {
427 UnreachableMemoryInfo info;
428 if (!GetUnreachableMemory(info, limit)) {
Colin Crossde42af02016-01-14 15:35:40 -0800429 return "Failed to get unreachable memory\n";
Colin Cross7add50d2016-01-14 15:35:40 -0800430 }
431
432 return info.ToString(log_contents);
433}
434
435bool LogUnreachableMemory(bool log_contents, size_t limit) {
436 UnreachableMemoryInfo info;
437 if (!GetUnreachableMemory(info, limit)) {
438 return false;
439 }
440
441 for (auto it = info.leaks.begin(); it != info.leaks.end(); it++) {
442 ALOGE("%s", it->ToString(log_contents).c_str());
443 }
444 return true;
445}
446
447
448bool NoLeaks() {
449 UnreachableMemoryInfo info;
450 if (!GetUnreachableMemory(info, 0)) {
451 return false;
452 }
453
454 return info.num_leaks == 0;
455}