blob: 7d7bbbd37c951f2c598eeef98a9fb0c6a22a4bfd [file] [log] [blame]
Colin Cross77673182016-01-14 15:35:40 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Colin Crossf8bf3272016-04-26 16:51:32 -070017#include <errno.h>
Colin Cross77673182016-01-14 15:35:40 -080018#include <inttypes.h>
Peter Collingbourneb4a37ff2020-01-10 19:15:35 -080019#include <sys/auxv.h>
Colin Crossf8bf3272016-04-26 16:51:32 -070020#include <sys/mman.h>
21#include <unistd.h>
Colin Cross77673182016-01-14 15:35:40 -080022
Peter Collingbourneb4a37ff2020-01-10 19:15:35 -080023#include <bionic/mte_kernel.h>
24
Colin Cross77673182016-01-14 15:35:40 -080025#include <map>
26#include <utility>
27
28#include "Allocator.h"
29#include "HeapWalker.h"
Colin Cross6f922a42016-03-02 17:53:39 -080030#include "LeakFolding.h"
Colin Crossf8bf3272016-04-26 16:51:32 -070031#include "ScopedSignalHandler.h"
Colin Cross77673182016-01-14 15:35:40 -080032#include "log.h"
33
Colin Cross1fa81f52017-06-21 13:13:00 -070034namespace android {
35
Colin Cross77673182016-01-14 15:35:40 -080036bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
37 if (end == begin) {
38 end = begin + 1;
39 }
Colin Cross6f922a42016-03-02 17:53:39 -080040 Range range{begin, end};
Colin Cross223069f2018-11-28 17:01:59 -080041 if (valid_mappings_range_.end != 0 &&
42 (begin < valid_mappings_range_.begin || end > valid_mappings_range_.end)) {
43 MEM_LOG_ALWAYS_FATAL("allocation %p-%p is outside mapping range %p-%p",
44 reinterpret_cast<void*>(begin), reinterpret_cast<void*>(end),
45 reinterpret_cast<void*>(valid_mappings_range_.begin),
46 reinterpret_cast<void*>(valid_mappings_range_.end));
47 }
Colin Cross6f922a42016-03-02 17:53:39 -080048 auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
Colin Cross77673182016-01-14 15:35:40 -080049 if (inserted.second) {
50 valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
51 valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
Colin Cross6f922a42016-03-02 17:53:39 -080052 allocation_bytes_ += range.size();
Colin Cross77673182016-01-14 15:35:40 -080053 return true;
54 } else {
55 Range overlap = inserted.first->first;
Colin Cross1f997062016-04-26 17:10:04 -070056 if (overlap != range) {
Christopher Ferris56b8d862017-05-03 17:34:29 -070057 MEM_ALOGE("range %p-%p overlaps with existing range %p-%p", reinterpret_cast<void*>(begin),
58 reinterpret_cast<void*>(end), reinterpret_cast<void*>(overlap.begin),
59 reinterpret_cast<void*>(overlap.end));
Colin Cross1f997062016-04-26 17:10:04 -070060 }
Colin Cross77673182016-01-14 15:35:40 -080061 return false;
62 }
63}
64
Peter Collingbourneb4a37ff2020-01-10 19:15:35 -080065// Sanitizers and MTE may consider certain memory inaccessible through certain pointers.
66// With MTE we set PSTATE.TCO during the access to suppress tag checks.
Evgenii Stepanov94485fa2019-03-19 17:17:47 -070067static uintptr_t ReadWordAtAddressUnsafe(uintptr_t word_ptr)
68 __attribute__((no_sanitize("address", "hwaddress"))) {
Peter Collingbourneb4a37ff2020-01-10 19:15:35 -080069#if defined(__aarch64__)
70#if defined(ANDROID_EXPERIMENTAL_MTE)
71 static bool mte = getauxval(AT_HWCAP2) & HWCAP2_MTE;
72#else
73 static bool mte = false;
74#endif
75 if (mte) {
76 // Disable tag checks.
77 __asm__ __volatile__(".arch_extension mte; msr tco, #1");
78 }
79#endif
80
81 // Load a word from memory without ASAN/HWASAN/MTE checks.
82 uintptr_t retval = *reinterpret_cast<uintptr_t*>(word_ptr);
83
84#if defined(__aarch64__)
85 if (mte) {
86 // Re-enable tag checks.
87 __asm__ __volatile__(".arch_extension mte; msr tco, #0");
88 }
89#endif
90 return retval;
Evgenii Stepanov94485fa2019-03-19 17:17:47 -070091}
92
Colin Crossf8bf3272016-04-26 16:51:32 -070093bool HeapWalker::WordContainsAllocationPtr(uintptr_t word_ptr, Range* range, AllocationInfo** info) {
94 walking_ptr_ = word_ptr;
95 // This access may segfault if the process under test has done something strange,
96 // for example mprotect(PROT_NONE) on a native heap page. If so, it will be
97 // caught and handled by mmaping a zero page over the faulting page.
Evgenii Stepanov94485fa2019-03-19 17:17:47 -070098 uintptr_t value = ReadWordAtAddressUnsafe(word_ptr);
Colin Crossf8bf3272016-04-26 16:51:32 -070099 walking_ptr_ = 0;
100 if (value >= valid_allocations_range_.begin && value < valid_allocations_range_.end) {
101 AllocationMap::iterator it = allocations_.find(Range{value, value + 1});
Colin Cross6f922a42016-03-02 17:53:39 -0800102 if (it != allocations_.end()) {
103 *range = it->first;
104 *info = &it->second;
105 return true;
106 }
107 }
108 return false;
109}
110
111void HeapWalker::RecurseRoot(const Range& root) {
112 allocator::vector<Range> to_do(1, root, allocator_);
Colin Cross77673182016-01-14 15:35:40 -0800113 while (!to_do.empty()) {
114 Range range = to_do.back();
115 to_do.pop_back();
Colin Cross6f922a42016-03-02 17:53:39 -0800116
Colin Crossc2c76d42018-11-27 16:14:53 -0800117 walking_range_ = range;
Colin Cross6f922a42016-03-02 17:53:39 -0800118 ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
119 if (!ref_info->referenced_from_root) {
120 ref_info->referenced_from_root = true;
121 to_do.push_back(ref_range);
Colin Cross77673182016-01-14 15:35:40 -0800122 }
Colin Cross6f922a42016-03-02 17:53:39 -0800123 });
Colin Crossc2c76d42018-11-27 16:14:53 -0800124 walking_range_ = Range{0, 0};
Colin Cross77673182016-01-14 15:35:40 -0800125 }
126}
127
Colin Cross223069f2018-11-28 17:01:59 -0800128void HeapWalker::Mapping(uintptr_t begin, uintptr_t end) {
129 valid_mappings_range_.begin = std::min(valid_mappings_range_.begin, begin);
130 valid_mappings_range_.end = std::max(valid_mappings_range_.end, end);
131}
132
Colin Cross77673182016-01-14 15:35:40 -0800133void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
134 roots_.push_back(Range{begin, end});
135}
136
137void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
138 root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
139}
140
141size_t HeapWalker::Allocations() {
142 return allocations_.size();
143}
144
145size_t HeapWalker::AllocationBytes() {
146 return allocation_bytes_;
147}
148
149bool HeapWalker::DetectLeaks() {
Colin Cross6f922a42016-03-02 17:53:39 -0800150 // Recursively walk pointers from roots to mark referenced allocations
Colin Cross77673182016-01-14 15:35:40 -0800151 for (auto it = roots_.begin(); it != roots_.end(); it++) {
Colin Cross6f922a42016-03-02 17:53:39 -0800152 RecurseRoot(*it);
Colin Cross77673182016-01-14 15:35:40 -0800153 }
154
155 Range vals;
156 vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
157 vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
Colin Cross77673182016-01-14 15:35:40 -0800158
Colin Cross6f922a42016-03-02 17:53:39 -0800159 RecurseRoot(vals);
Colin Cross77673182016-01-14 15:35:40 -0800160
Colin Crossc2c76d42018-11-27 16:14:53 -0800161 if (segv_page_count_ > 0) {
162 MEM_ALOGE("%zu pages skipped due to segfaults", segv_page_count_);
163 }
164
Colin Cross77673182016-01-14 15:35:40 -0800165 return true;
166}
167
Colin Cross401319a2017-06-22 10:50:05 -0700168bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, size_t* num_leaks_out,
169 size_t* leak_bytes_out) {
Colin Cross77673182016-01-14 15:35:40 -0800170 leaked.clear();
171
172 size_t num_leaks = 0;
173 size_t leak_bytes = 0;
174 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
175 if (!it->second.referenced_from_root) {
176 num_leaks++;
177 leak_bytes += it->first.end - it->first.begin;
178 }
179 }
180
181 size_t n = 0;
182 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
183 if (!it->second.referenced_from_root) {
Colin Cross6f922a42016-03-02 17:53:39 -0800184 if (n++ < limit) {
Colin Cross77673182016-01-14 15:35:40 -0800185 leaked.push_back(it->first);
186 }
187 }
188 }
189
190 if (num_leaks_out) {
191 *num_leaks_out = num_leaks;
192 }
193 if (leak_bytes_out) {
194 *leak_bytes_out = leak_bytes;
195 }
196
197 return true;
198}
Colin Crossf8bf3272016-04-26 16:51:32 -0700199
200static bool MapOverPage(void* addr) {
201 const size_t page_size = sysconf(_SC_PAGE_SIZE);
Colin Cross401319a2017-06-22 10:50:05 -0700202 void* page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size - 1));
Colin Crossf8bf3272016-04-26 16:51:32 -0700203
Colin Cross401319a2017-06-22 10:50:05 -0700204 void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
Colin Crossf8bf3272016-04-26 16:51:32 -0700205 if (ret == MAP_FAILED) {
Christopher Ferris56b8d862017-05-03 17:34:29 -0700206 MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno));
Colin Crossf8bf3272016-04-26 16:51:32 -0700207 return false;
208 }
209
210 return true;
211}
212
Colin Cross401319a2017-06-22 10:50:05 -0700213void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si,
214 void* /*uctx*/) {
Colin Crossf8bf3272016-04-26 16:51:32 -0700215 uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
216 if (addr != walking_ptr_) {
217 handler.reset();
218 return;
219 }
Colin Crossc2c76d42018-11-27 16:14:53 -0800220 if (!segv_logged_) {
221 MEM_ALOGW("failed to read page at %p, signal %d", si->si_addr, signal);
222 if (walking_range_.begin != 0U) {
223 MEM_ALOGW("while walking range %p-%p", reinterpret_cast<void*>(walking_range_.begin),
224 reinterpret_cast<void*>(walking_range_.end));
225 }
226 segv_logged_ = true;
227 }
228 segv_page_count_++;
Colin Crossf8bf3272016-04-26 16:51:32 -0700229 if (!MapOverPage(si->si_addr)) {
230 handler.reset();
231 }
232}
233
Colin Cross8837c722019-03-20 16:02:54 -0700234Allocator<ScopedSignalHandler::SignalFnMap>::unique_ptr ScopedSignalHandler::handler_map_;
Colin Cross1fa81f52017-06-21 13:13:00 -0700235
236} // namespace android