blob: eca26eb6df66981c183d81f2fa5ef1f1d3896ac0 [file] [log] [blame]
Colin Cross7add50d2016-01-14 15:35:40 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <inttypes.h>
18
19#include <functional>
20#include <iomanip>
21#include <mutex>
22#include <string>
23#include <sstream>
24
25#include <backtrace.h>
26#include <android-base/macros.h>
27
28#include "Allocator.h"
29#include "HeapWalker.h"
30#include "LeakPipe.h"
31#include "ProcessMappings.h"
32#include "PtracerThread.h"
33#include "ScopedDisableMalloc.h"
34#include "Semaphore.h"
35#include "ThreadCapture.h"
36
37#include "memunreachable/memunreachable.h"
38#include "bionic.h"
39#include "log.h"
40
41const size_t Leak::contents_length;
42
43using namespace std::chrono_literals;
44
45class MemUnreachable {
46 public:
47 MemUnreachable(pid_t pid, Allocator<void> allocator) : pid_(pid), allocator_(allocator),
48 heap_walker_(allocator_) {}
49 bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
50 const allocator::vector<Mapping>& mappings);
51 bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
52 size_t* num_leaks, size_t* leak_bytes);
53 size_t Allocations() { return heap_walker_.Allocations(); }
54 size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
55 private:
56 bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
57 allocator::vector<Mapping>& heap_mappings,
58 allocator::vector<Mapping>& anon_mappings,
59 allocator::vector<Mapping>& globals_mappings,
60 allocator::vector<Mapping>& stack_mappings);
61 DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
62 pid_t pid_;
63 Allocator<void> allocator_;
64 HeapWalker heap_walker_;
65};
66
67static void HeapIterate(const Mapping& heap_mapping,
68 const std::function<void(uintptr_t, size_t)>& func) {
69 malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
70 [](uintptr_t base, size_t size, void* arg) {
71 auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
72 (*f)(base, size);
73 }, const_cast<void*>(reinterpret_cast<const void*>(&func)));
74}
75
76bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
77 const allocator::vector<Mapping>& mappings) {
78 ALOGI("searching process %d for allocations", pid_);
79 allocator::vector<Mapping> heap_mappings{mappings};
80 allocator::vector<Mapping> anon_mappings{mappings};
81 allocator::vector<Mapping> globals_mappings{mappings};
82 allocator::vector<Mapping> stack_mappings{mappings};
83 if (!ClassifyMappings(mappings, heap_mappings, anon_mappings,
84 globals_mappings, stack_mappings)) {
85 return false;
86 }
87
88 for (auto it = heap_mappings.begin(); it != heap_mappings.end(); it++) {
89 ALOGV("Heap mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
90 HeapIterate(*it, [&](uintptr_t base, size_t size) {
91 heap_walker_.Allocation(base, base + size);
92 });
93 }
94
95 for (auto it = anon_mappings.begin(); it != anon_mappings.end(); it++) {
96 ALOGV("Anon mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
97 heap_walker_.Allocation(it->begin, it->end);
98 }
99
100 for (auto it = globals_mappings.begin(); it != globals_mappings.end(); it++) {
101 ALOGV("Globals mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
102 heap_walker_.Root(it->begin, it->end);
103 }
104
105 for (auto thread_it = threads.begin(); thread_it != threads.end(); thread_it++) {
106 for (auto it = stack_mappings.begin(); it != stack_mappings.end(); it++) {
107 if (thread_it->stack.first >= it->begin && thread_it->stack.first <= it->end) {
108 ALOGV("Stack %" PRIxPTR "-%" PRIxPTR " %s", thread_it->stack.first, it->end, it->name);
109 heap_walker_.Root(thread_it->stack.first, it->end);
110 }
111 }
112 heap_walker_.Root(thread_it->regs);
113 }
114
115 ALOGI("searching done");
116
117 return true;
118}
119
120bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
121 size_t* num_leaks, size_t* leak_bytes) {
122 ALOGI("sweeping process %d for unreachable memory", pid_);
123 leaks.clear();
124
125 allocator::vector<Range> leaked{allocator_};
126 if (!heap_walker_.Leaked(leaked, limit, num_leaks, leak_bytes)) {
127 return false;
128 }
129
130 for (auto it = leaked.begin(); it != leaked.end(); it++) {
131 Leak leak{};
132 leak.begin = it->begin;
133 leak.size = it->end - it->begin;;
134 memcpy(leak.contents, reinterpret_cast<void*>(it->begin),
135 std::min(leak.size, Leak::contents_length));
136 ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it->begin),
137 leak.backtrace_frames, leak.backtrace_length);
138 if (num_backtrace_frames > 0) {
139 leak.num_backtrace_frames = num_backtrace_frames;
140 }
141 leaks.emplace_back(leak);
142 }
143
144 ALOGI("sweeping done");
145
146 return true;
147}
148
149static bool has_prefix(const allocator::string& s, const char* prefix) {
150 int ret = s.compare(0, strlen(prefix), prefix);
151 return ret == 0;
152}
153
154bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
155 allocator::vector<Mapping>& heap_mappings,
156 allocator::vector<Mapping>& anon_mappings,
157 allocator::vector<Mapping>& globals_mappings,
158 allocator::vector<Mapping>& stack_mappings)
159{
160 heap_mappings.clear();
161 anon_mappings.clear();
162 globals_mappings.clear();
163 stack_mappings.clear();
164
165 allocator::string current_lib{allocator_};
166
167 for (auto it = mappings.begin(); it != mappings.end(); it++) {
168 if (it->execute) {
169 current_lib = it->name;
170 continue;
171 }
172
173 if (!it->read) {
174 continue;
175 }
176
177 const allocator::string mapping_name{it->name, allocator_};
178 if (mapping_name == "[anon:.bss]") {
179 // named .bss section
180 globals_mappings.emplace_back(*it);
181 } else if (mapping_name == current_lib) {
182 // .rodata or .data section
183 globals_mappings.emplace_back(*it);
184 } else if (mapping_name == "[anon:libc_malloc]") {
185 // named malloc mapping
186 heap_mappings.emplace_back(*it);
187 } else if (has_prefix(mapping_name, "/dev/ashmem/dalvik")) {
188 // named dalvik heap mapping
189 globals_mappings.emplace_back(*it);
190 } else if (has_prefix(mapping_name, "[stack")) {
191 // named stack mapping
192 stack_mappings.emplace_back(*it);
193 } else if (mapping_name.size() == 0) {
194 globals_mappings.emplace_back(*it);
195 } else if (has_prefix(mapping_name, "[anon:") && mapping_name != "[anon:leak_detector_malloc]") {
196 // TODO(ccross): it would be nice to treat named anonymous mappings as
197 // possible leaks, but naming something in a .bss or .data section makes
198 // it impossible to distinguish them from mmaped and then named mappings.
199 globals_mappings.emplace_back(*it);
200 }
201 }
202
203 return true;
204}
205
206bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
207 int parent_pid = getpid();
208 int parent_tid = gettid();
209
210 Heap heap;
211
212 Semaphore continue_parent_sem;
213 LeakPipe pipe;
214
215 PtracerThread thread{[&]() -> int {
216 /////////////////////////////////////////////
217 // Collection thread
218 /////////////////////////////////////////////
219 ALOGI("collecting thread info for process %d...", parent_pid);
220
221 ThreadCapture thread_capture(parent_pid, heap);
222 allocator::vector<ThreadInfo> thread_info(heap);
223 allocator::vector<Mapping> mappings(heap);
224
225 // ptrace all the threads
226 if (!thread_capture.CaptureThreads()) {
Colin Crossde42af02016-01-14 15:35:40 -0800227 continue_parent_sem.Post();
Colin Cross7add50d2016-01-14 15:35:40 -0800228 return 1;
229 }
230
231 // collect register contents and stacks
232 if (!thread_capture.CapturedThreadInfo(thread_info)) {
Colin Crossde42af02016-01-14 15:35:40 -0800233 continue_parent_sem.Post();
Colin Cross7add50d2016-01-14 15:35:40 -0800234 return 1;
235 }
236
237 // snapshot /proc/pid/maps
238 if (!ProcessMappings(parent_pid, mappings)) {
Colin Crossde42af02016-01-14 15:35:40 -0800239 continue_parent_sem.Post();
Colin Cross7add50d2016-01-14 15:35:40 -0800240 return 1;
241 }
242
243 // malloc must be enabled to call fork, at_fork handlers take the same
244 // locks as ScopedDisableMalloc. All threads are paused in ptrace, so
245 // memory state is still consistent. Unfreeze the original thread so it
246 // can drop the malloc locks, it will block until the collection thread
247 // exits.
248 thread_capture.ReleaseThread(parent_tid);
249 continue_parent_sem.Post();
250
251 // fork a process to do the heap walking
252 int ret = fork();
253 if (ret < 0) {
254 return 1;
255 } else if (ret == 0) {
256 /////////////////////////////////////////////
257 // Heap walker process
258 /////////////////////////////////////////////
259 // Examine memory state in the child using the data collected above and
260 // the CoW snapshot of the process memory contents.
261
262 if (!pipe.OpenSender()) {
263 _exit(1);
264 }
265
266 MemUnreachable unreachable{parent_pid, heap};
267
268 if (!unreachable.CollectAllocations(thread_info, mappings)) {
269 _exit(2);
270 }
271 size_t num_allocations = unreachable.Allocations();
272 size_t allocation_bytes = unreachable.AllocationBytes();
273
274 allocator::vector<Leak> leaks{heap};
275
276 size_t num_leaks = 0;
277 size_t leak_bytes = 0;
278 bool ok = unreachable.GetUnreachableMemory(leaks, limit, &num_leaks, &leak_bytes);
279
280 ok = ok && pipe.Sender().Send(num_allocations);
281 ok = ok && pipe.Sender().Send(allocation_bytes);
282 ok = ok && pipe.Sender().Send(num_leaks);
283 ok = ok && pipe.Sender().Send(leak_bytes);
284 ok = ok && pipe.Sender().SendVector(leaks);
285
286 if (!ok) {
287 _exit(3);
288 }
289
290 _exit(0);
291 } else {
292 // Nothing left to do in the collection thread, return immediately,
293 // releasing all the captured threads.
294 ALOGI("collection thread done");
295 return 0;
296 }
297 }};
298
299 /////////////////////////////////////////////
300 // Original thread
301 /////////////////////////////////////////////
302
303 {
304 // Disable malloc to get a consistent view of memory
305 ScopedDisableMalloc disable_malloc;
306
307 // Start the collection thread
308 thread.Start();
309
310 // Wait for the collection thread to signal that it is ready to fork the
311 // heap walker process.
Colin Crossde42af02016-01-14 15:35:40 -0800312 continue_parent_sem.Wait(30s);
Colin Cross7add50d2016-01-14 15:35:40 -0800313
314 // Re-enable malloc so the collection thread can fork.
315 }
316
317 // Wait for the collection thread to exit
318 int ret = thread.Join();
319 if (ret != 0) {
320 return false;
321 }
322
323 // Get a pipe from the heap walker process. Transferring a new pipe fd
324 // ensures no other forked processes can have it open, so when the heap
325 // walker process dies the remote side of the pipe will close.
326 if (!pipe.OpenReceiver()) {
327 return false;
328 }
329
330 bool ok = true;
331 ok = ok && pipe.Receiver().Receive(&info.num_allocations);
332 ok = ok && pipe.Receiver().Receive(&info.allocation_bytes);
333 ok = ok && pipe.Receiver().Receive(&info.num_leaks);
334 ok = ok && pipe.Receiver().Receive(&info.leak_bytes);
335 ok = ok && pipe.Receiver().ReceiveVector(info.leaks);
336 if (!ok) {
337 return false;
338 }
339
340 ALOGI("unreachable memory detection done");
341 ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s",
342 info.leak_bytes, info.num_leaks, info.num_leaks == 1 ? "" : "s",
343 info.allocation_bytes, info.num_allocations, info.num_allocations == 1 ? "" : "s");
344
345 return true;
346}
347
348std::string Leak::ToString(bool log_contents) const {
349
350 std::ostringstream oss;
351
352 oss << " " << std::dec << size;
Colin Crossde42af02016-01-14 15:35:40 -0800353 oss << " bytes unreachable at ";
Colin Cross7add50d2016-01-14 15:35:40 -0800354 oss << std::hex << begin;
355 oss << std::endl;
356
357 if (log_contents) {
358 const int bytes_per_line = 16;
359 const size_t bytes = std::min(size, contents_length);
360
361 if (bytes == size) {
362 oss << " contents:" << std::endl;
363 } else {
364 oss << " first " << bytes << " bytes of contents:" << std::endl;
365 }
366
367 for (size_t i = 0; i < bytes; i += bytes_per_line) {
368 oss << " " << std::hex << begin + i << ": ";
369 size_t j;
370 oss << std::setfill('0');
371 for (j = i; j < bytes && j < i + bytes_per_line; j++) {
372 oss << std::setw(2) << static_cast<int>(contents[j]) << " ";
373 }
374 oss << std::setfill(' ');
375 for (; j < i + bytes_per_line; j++) {
376 oss << " ";
377 }
378 for (j = i; j < bytes && j < i + bytes_per_line; j++) {
379 char c = contents[j];
380 if (c < ' ' || c >= 0x7f) {
381 c = '.';
382 }
383 oss << c;
384 }
385 oss << std::endl;
386 }
387 }
388 if (num_backtrace_frames > 0) {
389 oss << backtrace_string(backtrace_frames, num_backtrace_frames);
390 }
391
392 return oss.str();
393}
394
395std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
396 std::ostringstream oss;
397 oss << " " << leak_bytes << " bytes in ";
398 oss << num_leaks << " unreachable allocation" << (num_leaks == 1 ? "" : "s");
399 oss << std::endl;
400
401 for (auto it = leaks.begin(); it != leaks.end(); it++) {
402 oss << it->ToString(log_contents);
403 }
404
405 return oss.str();
406}
407
408std::string GetUnreachableMemoryString(bool log_contents, size_t limit) {
409 UnreachableMemoryInfo info;
410 if (!GetUnreachableMemory(info, limit)) {
Colin Crossde42af02016-01-14 15:35:40 -0800411 return "Failed to get unreachable memory\n";
Colin Cross7add50d2016-01-14 15:35:40 -0800412 }
413
414 return info.ToString(log_contents);
415}
416
417bool LogUnreachableMemory(bool log_contents, size_t limit) {
418 UnreachableMemoryInfo info;
419 if (!GetUnreachableMemory(info, limit)) {
420 return false;
421 }
422
423 for (auto it = info.leaks.begin(); it != info.leaks.end(); it++) {
424 ALOGE("%s", it->ToString(log_contents).c_str());
425 }
426 return true;
427}
428
429
430bool NoLeaks() {
431 UnreachableMemoryInfo info;
432 if (!GetUnreachableMemory(info, 0)) {
433 return false;
434 }
435
436 return info.num_leaks == 0;
437}