Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
David Sehr | 79e2607 | 2018-04-06 17:58:50 -0700 | [diff] [blame] | 17 | #ifndef ART_LIBARTBASE_BASE_MEM_MAP_H_ |
| 18 | #define ART_LIBARTBASE_BASE_MEM_MAP_H_ |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 19 | |
Brian Carlstrom | 27ec961 | 2011-09-19 20:20:38 -0700 | [diff] [blame] | 20 | #include <stddef.h> |
| 21 | #include <sys/types.h> |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 22 | |
Andreas Gampe | 0dfc315 | 2017-04-24 07:58:06 -0700 | [diff] [blame] | 23 | #include <map> |
Igor Murashkin | 5573c37 | 2017-11-16 13:34:30 -0800 | [diff] [blame] | 24 | #include <mutex> |
Andreas Gampe | 0dfc315 | 2017-04-24 07:58:06 -0700 | [diff] [blame] | 25 | #include <string> |
| 26 | |
| 27 | #include "android-base/thread_annotations.h" |
David Sehr | 1979c64 | 2018-04-26 14:41:18 -0700 | [diff] [blame] | 28 | #include "macros.h" |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 29 | |
| 30 | namespace art { |
| 31 | |
Steve Austin | 882ed6b | 2018-06-08 11:40:38 -0700 | [diff] [blame] | 32 | #if defined(__LP64__) && !defined(__Fuchsia__) && \ |
| 33 | (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__)) |
Ian Rogers | c3ccc10 | 2014-06-25 11:52:14 -0700 | [diff] [blame] | 34 | #define USE_ART_LOW_4G_ALLOCATOR 1 |
| 35 | #else |
Steve Austin | 882ed6b | 2018-06-08 11:40:38 -0700 | [diff] [blame] | 36 | #if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__) |
Andreas Gampe | 651ba59 | 2017-06-14 14:41:33 -0700 | [diff] [blame] | 37 | #error "Unrecognized 64-bit architecture." |
| 38 | #endif |
Ian Rogers | c3ccc10 | 2014-06-25 11:52:14 -0700 | [diff] [blame] | 39 | #define USE_ART_LOW_4G_ALLOCATOR 0 |
| 40 | #endif |
| 41 | |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 42 | #ifdef __linux__ |
| 43 | static constexpr bool kMadviseZeroes = true; |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 44 | #define HAVE_MREMAP_SYSCALL true |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 45 | #else |
| 46 | static constexpr bool kMadviseZeroes = false; |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 47 | // We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not |
| 48 | // present. |
| 49 | #define HAVE_MREMAP_SYSCALL false |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 50 | #endif |
| 51 | |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 52 | // Used to keep track of mmap segments. |
Andreas Gampe | d8f26db | 2014-05-19 17:01:13 -0700 | [diff] [blame] | 53 | // |
| 54 | // On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan |
| 55 | // for free pages. For security, the start of this scan should be randomized. This requires a |
| 56 | // dynamic initializer. |
| 57 | // For this to work, it is paramount that there are no other static initializers that access MemMap. |
| 58 | // Otherwise, calls might see uninitialized values. |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 59 | class MemMap { |
| 60 | public: |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 61 | static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL; |
| 62 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 63 | // Creates an invalid mapping. |
| 64 | MemMap() {} |
| 65 | |
| 66 | // Creates an invalid mapping. Used when we want to be more explicit than MemMap(). |
| 67 | static MemMap Invalid() { |
| 68 | return MemMap(); |
| 69 | } |
| 70 | |
| 71 | MemMap(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_); |
| 72 | MemMap& operator=(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_) { |
| 73 | Reset(); |
| 74 | swap(other); |
| 75 | return *this; |
| 76 | } |
| 77 | |
| 78 | // Releases the memory mapping. |
| 79 | ~MemMap() REQUIRES(!MemMap::mem_maps_lock_); |
| 80 | |
| 81 | // Swap two MemMaps. |
| 82 | void swap(MemMap& other); |
| 83 | |
| 84 | void Reset() { |
| 85 | if (IsValid()) { |
| 86 | DoReset(); |
| 87 | } |
| 88 | } |
| 89 | |
| 90 | bool IsValid() const { |
| 91 | return base_size_ != 0u; |
| 92 | } |
| 93 | |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 94 | // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller |
| 95 | // relinquishes ownership of the source mmap. |
| 96 | // |
| 97 | // For the call to be successful: |
| 98 | // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with |
| 99 | // [source->Begin(), source->End()]. |
| 100 | // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated |
| 101 | // with them. |
| 102 | // * kCanReplaceMapping must be true. |
| 103 | // * Neither source nor dest may use manual redzones. |
| 104 | // * Both source and dest must have the same offset from the nearest page boundary. |
| 105 | // * mremap must succeed when called on the mappings. |
| 106 | // |
| 107 | // If this call succeeds it will return true and: |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 108 | // * Invalidate *source |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 109 | // * The protection of this will remain the same. |
| 110 | // * The size of this will be the size of the source |
| 111 | // * The data in this will be the data from source. |
| 112 | // |
| 113 | // If this call fails it will return false and make no changes to *source or this. The ownership |
| 114 | // of the source mmap is returned to the caller. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 115 | bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error); |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 116 | |
Joel Fernandes (Google) | 92597a8 | 2018-08-17 16:19:19 -0700 | [diff] [blame^] | 117 | // Set a debug friendly name for a map. It will be prefixed with "dalvik-". |
| 118 | static void SetDebugName(void* map_ptr, const char* name, size_t size); |
| 119 | |
Elliott Hughes | ecd3a6f | 2012-06-06 18:16:37 -0700 | [diff] [blame] | 120 | // Request an anonymous region of length 'byte_count' and a requested base address. |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 121 | // Use null as the requested base address if you don't care. |
Vladimir Marko | 5c42c29 | 2015-02-25 12:02:49 +0000 | [diff] [blame] | 122 | // "reuse" allows re-mapping an address range from an existing mapping. |
Elliott Hughes | 6c9c06d | 2011-11-07 16:43:47 -0800 | [diff] [blame] | 123 | // |
| 124 | // The word "anonymous" in this context means "not backed by a file". The supplied |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 125 | // 'name' will be used -- on systems that support it -- to give the mapping |
Elliott Hughes | 6c9c06d | 2011-11-07 16:43:47 -0800 | [diff] [blame] | 126 | // a name. |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 127 | // |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 128 | // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap. |
| 129 | static MemMap MapAnonymous(const char* name, |
| 130 | uint8_t* addr, |
| 131 | size_t byte_count, |
| 132 | int prot, |
| 133 | bool low_4gb, |
| 134 | bool reuse, |
| 135 | std::string* error_msg, |
Joel Fernandes (Google) | 92597a8 | 2018-08-17 16:19:19 -0700 | [diff] [blame^] | 136 | bool use_debug_name = true); |
Vladimir Marko | f6985bd | 2018-08-24 09:02:28 +0100 | [diff] [blame] | 137 | static MemMap MapAnonymous(const char* name, |
| 138 | uint8_t* addr, |
| 139 | size_t byte_count, |
| 140 | int prot, |
| 141 | bool low_4gb, |
| 142 | std::string* error_msg) { |
| 143 | return MapAnonymous(name, addr, byte_count, prot, low_4gb, /* reuse */ false, error_msg); |
| 144 | } |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 145 | |
David Srbecky | 1baabf0 | 2015-06-16 17:12:34 +0000 | [diff] [blame] | 146 | // Create placeholder for a region allocated by direct call to mmap. |
| 147 | // This is useful when we do not have control over the code calling mmap, |
| 148 | // but when we still want to keep track of it in the list. |
| 149 | // The region is not considered to be owned and will not be unmmaped. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 150 | static MemMap MapDummy(const char* name, uint8_t* addr, size_t byte_count); |
David Srbecky | 1baabf0 | 2015-06-16 17:12:34 +0000 | [diff] [blame] | 151 | |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 152 | // Map part of a file, taking care of non-page aligned offsets. The |
| 153 | // "start" offset is absolute, not relative. |
| 154 | // |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 155 | // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap. |
| 156 | static MemMap MapFile(size_t byte_count, |
| 157 | int prot, |
| 158 | int flags, |
| 159 | int fd, |
| 160 | off_t start, |
| 161 | bool low_4gb, |
| 162 | const char* filename, |
| 163 | std::string* error_msg) { |
Mathieu Chartier | 42bddce | 2015-11-09 15:16:56 -0800 | [diff] [blame] | 164 | return MapFileAtAddress(nullptr, |
| 165 | byte_count, |
| 166 | prot, |
| 167 | flags, |
| 168 | fd, |
| 169 | start, |
| 170 | /*low_4gb*/low_4gb, |
| 171 | /*reuse*/false, |
| 172 | filename, |
| 173 | error_msg); |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 174 | } |
| 175 | |
Mathieu Chartier | ebe2dfc | 2015-11-24 13:47:52 -0800 | [diff] [blame] | 176 | // Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute, |
| 177 | // not relative. This version allows requesting a specific address for the base of the mapping. |
| 178 | // "reuse" allows us to create a view into an existing mapping where we do not take ownership of |
| 179 | // the memory. If error_msg is null then we do not print /proc/maps to the log if |
| 180 | // MapFileAtAddress fails. This helps improve performance of the fail case since reading and |
| 181 | // printing /proc/maps takes several milliseconds in the worst case. |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 182 | // |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 183 | // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap. |
| 184 | static MemMap MapFileAtAddress(uint8_t* addr, |
| 185 | size_t byte_count, |
| 186 | int prot, |
| 187 | int flags, |
| 188 | int fd, |
| 189 | off_t start, |
| 190 | bool low_4gb, |
| 191 | bool reuse, |
| 192 | const char* filename, |
| 193 | std::string* error_msg); |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 194 | |
Brian Carlstrom | 0d6adac | 2014-02-05 17:39:16 -0800 | [diff] [blame] | 195 | const std::string& GetName() const { |
| 196 | return name_; |
| 197 | } |
| 198 | |
Vladimir Marko | 9bdf108 | 2016-01-21 12:15:52 +0000 | [diff] [blame] | 199 | bool Sync(); |
| 200 | |
Logan Chien | d88fa26 | 2012-06-06 15:23:32 +0800 | [diff] [blame] | 201 | bool Protect(int prot); |
| 202 | |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 203 | void MadviseDontNeedAndZero(); |
| 204 | |
Ian Rogers | 1c849e5 | 2012-06-28 14:00:33 -0700 | [diff] [blame] | 205 | int GetProtect() const { |
| 206 | return prot_; |
| 207 | } |
| 208 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 209 | uint8_t* Begin() const { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 210 | return begin_; |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 211 | } |
| 212 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 213 | size_t Size() const { |
| 214 | return size_; |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 215 | } |
| 216 | |
Mathieu Chartier | 379d09f | 2015-01-08 11:28:13 -0800 | [diff] [blame] | 217 | // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking. |
| 218 | void SetSize(size_t new_size); |
| 219 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 220 | uint8_t* End() const { |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 221 | return Begin() + Size(); |
| 222 | } |
| 223 | |
Brian Carlstrom | 0d6adac | 2014-02-05 17:39:16 -0800 | [diff] [blame] | 224 | void* BaseBegin() const { |
| 225 | return base_begin_; |
| 226 | } |
| 227 | |
| 228 | size_t BaseSize() const { |
| 229 | return base_size_; |
| 230 | } |
| 231 | |
| 232 | void* BaseEnd() const { |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 233 | return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize(); |
Brian Carlstrom | 0d6adac | 2014-02-05 17:39:16 -0800 | [diff] [blame] | 234 | } |
| 235 | |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 236 | bool HasAddress(const void* addr) const { |
| 237 | return Begin() <= addr && addr < End(); |
Brian Carlstrom | b765be0 | 2011-08-17 23:54:10 -0700 | [diff] [blame] | 238 | } |
| 239 | |
Hiroshi Yamauchi | fd7e7f1 | 2013-10-22 14:17:48 -0700 | [diff] [blame] | 240 | // Unmap the pages at end and remap them to create another memory map. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 241 | MemMap RemapAtEnd(uint8_t* new_end, |
| 242 | const char* tail_name, |
| 243 | int tail_prot, |
| 244 | std::string* error_msg, |
Joel Fernandes (Google) | 92597a8 | 2018-08-17 16:19:19 -0700 | [diff] [blame^] | 245 | bool use_debug_name = true); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 246 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 247 | static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 248 | REQUIRES(!MemMap::mem_maps_lock_); |
Vladimir Marko | 17a924a | 2015-05-08 15:17:32 +0100 | [diff] [blame] | 249 | static void DumpMaps(std::ostream& os, bool terse = false) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 250 | REQUIRES(!MemMap::mem_maps_lock_); |
Hiroshi Yamauchi | 3eed93d | 2014-06-04 11:43:59 -0700 | [diff] [blame] | 251 | |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 252 | // Init and Shutdown are NOT thread safe. |
| 253 | // Both may be called multiple times and MemMap objects may be created any |
| 254 | // time after the first call to Init and before the first call to Shutodwn. |
| 255 | static void Init() REQUIRES(!MemMap::mem_maps_lock_); |
| 256 | static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_); |
Mathieu Chartier | 6e88ef6 | 2014-10-14 15:01:24 -0700 | [diff] [blame] | 257 | |
Hiroshi Yamauchi | 6edb9ae | 2016-02-08 14:18:21 -0800 | [diff] [blame] | 258 | // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not |
| 259 | // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working |
| 260 | // intermittently. |
| 261 | void TryReadable(); |
| 262 | |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 263 | // Align the map by unmapping the unaligned parts at the lower and the higher ends. |
| 264 | void AlignBy(size_t size); |
| 265 | |
Andreas Gampe | 0dfc315 | 2017-04-24 07:58:06 -0700 | [diff] [blame] | 266 | // For annotation reasons. |
| 267 | static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) { |
| 268 | return nullptr; |
| 269 | } |
| 270 | |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 271 | private: |
Mathieu Chartier | 42bddce | 2015-11-09 15:16:56 -0800 | [diff] [blame] | 272 | MemMap(const std::string& name, |
| 273 | uint8_t* begin, |
| 274 | size_t size, |
| 275 | void* base_begin, |
| 276 | size_t base_size, |
| 277 | int prot, |
| 278 | bool reuse, |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 279 | size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_); |
Hiroshi Yamauchi | 3eed93d | 2014-06-04 11:43:59 -0700 | [diff] [blame] | 280 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 281 | void DoReset(); |
| 282 | void Invalidate(); |
| 283 | void SwapMembers(MemMap& other); |
| 284 | |
Vladimir Marko | 17a924a | 2015-05-08 15:17:32 +0100 | [diff] [blame] | 285 | static void DumpMapsLocked(std::ostream& os, bool terse) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 286 | REQUIRES(MemMap::mem_maps_lock_); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 287 | static bool HasMemMap(MemMap& map) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 288 | REQUIRES(MemMap::mem_maps_lock_); |
Hiroshi Yamauchi | 3eed93d | 2014-06-04 11:43:59 -0700 | [diff] [blame] | 289 | static MemMap* GetLargestMemMapAt(void* address) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 290 | REQUIRES(MemMap::mem_maps_lock_); |
Mathieu Chartier | e58991b | 2015-10-13 07:59:34 -0700 | [diff] [blame] | 291 | static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 292 | REQUIRES(!MemMap::mem_maps_lock_); |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 293 | |
Mathieu Chartier | 42bddce | 2015-11-09 15:16:56 -0800 | [diff] [blame] | 294 | // Internal version of mmap that supports low 4gb emulation. |
| 295 | static void* MapInternal(void* addr, |
| 296 | size_t length, |
| 297 | int prot, |
| 298 | int flags, |
| 299 | int fd, |
| 300 | off_t offset, |
Andreas Gampe | 651ba59 | 2017-06-14 14:41:33 -0700 | [diff] [blame] | 301 | bool low_4gb) |
| 302 | REQUIRES(!MemMap::mem_maps_lock_); |
| 303 | static void* MapInternalArtLow4GBAllocator(size_t length, |
| 304 | int prot, |
| 305 | int flags, |
| 306 | int fd, |
| 307 | off_t offset) |
| 308 | REQUIRES(!MemMap::mem_maps_lock_); |
Mathieu Chartier | 42bddce | 2015-11-09 15:16:56 -0800 | [diff] [blame] | 309 | |
Steve Austin | 882ed6b | 2018-06-08 11:40:38 -0700 | [diff] [blame] | 310 | // member function to access real_munmap |
| 311 | static bool CheckMapRequest(uint8_t* expected_ptr, |
| 312 | void* actual_ptr, |
| 313 | size_t byte_count, |
| 314 | std::string* error_msg); |
| 315 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 316 | std::string name_; |
| 317 | uint8_t* begin_ = nullptr; // Start of data. May be changed by AlignBy. |
| 318 | size_t size_ = 0u; // Length of data. |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 319 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 320 | void* base_begin_ = nullptr; // Page-aligned base address. May be changed by AlignBy. |
| 321 | size_t base_size_ = 0u; // Length of mapping. May be changed by RemapAtEnd (ie Zygote). |
| 322 | int prot_ = 0; // Protection of the map. |
Hiroshi Yamauchi | fd7e7f1 | 2013-10-22 14:17:48 -0700 | [diff] [blame] | 323 | |
Jim_Guo | a62a588 | 2014-04-28 11:11:57 +0800 | [diff] [blame] | 324 | // When reuse_ is true, this is just a view of an existing mapping |
| 325 | // and we do not take ownership and are not responsible for |
| 326 | // unmapping. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 327 | bool reuse_ = false; |
Jim_Guo | a62a588 | 2014-04-28 11:11:57 +0800 | [diff] [blame] | 328 | |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 329 | // When already_unmapped_ is true the destructor will not call munmap. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 330 | bool already_unmapped_ = false; |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 331 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 332 | size_t redzone_size_ = 0u; |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 333 | |
Ian Rogers | c3ccc10 | 2014-06-25 11:52:14 -0700 | [diff] [blame] | 334 | #if USE_ART_LOW_4G_ALLOCATOR |
| 335 | static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent. |
Steve Austin | 882ed6b | 2018-06-08 11:40:38 -0700 | [diff] [blame] | 336 | |
| 337 | static void* TryMemMapLow4GB(void* ptr, |
| 338 | size_t page_aligned_byte_count, |
| 339 | int prot, |
| 340 | int flags, |
| 341 | int fd, |
| 342 | off_t offset); |
Stuart Monteith | 8dba5aa | 2014-03-12 12:44:01 +0000 | [diff] [blame] | 343 | #endif |
| 344 | |
Steve Austin | 882ed6b | 2018-06-08 11:40:38 -0700 | [diff] [blame] | 345 | static void TargetMMapInit(); |
| 346 | static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off); |
| 347 | static int TargetMUnmap(void* start, size_t len); |
| 348 | |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 349 | static std::mutex* mem_maps_lock_; |
| 350 | |
Hiroshi Yamauchi | fd7e7f1 | 2013-10-22 14:17:48 -0700 | [diff] [blame] | 351 | friend class MemMapTest; // To allow access to base_begin_ and base_size_. |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 352 | }; |
Mathieu Chartier | 6e6078a | 2016-10-24 15:45:41 -0700 | [diff] [blame] | 353 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 354 | inline void swap(MemMap& lhs, MemMap& rhs) { |
| 355 | lhs.swap(rhs); |
| 356 | } |
| 357 | |
Brian Carlstrom | 0d6adac | 2014-02-05 17:39:16 -0800 | [diff] [blame] | 358 | std::ostream& operator<<(std::ostream& os, const MemMap& mem_map); |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 359 | |
Mathieu Chartier | 6e6078a | 2016-10-24 15:45:41 -0700 | [diff] [blame] | 360 | // Zero and release pages if possible, no requirements on alignments. |
| 361 | void ZeroAndReleasePages(void* address, size_t length); |
| 362 | |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 363 | } // namespace art |
| 364 | |
David Sehr | 79e2607 | 2018-04-06 17:58:50 -0700 | [diff] [blame] | 365 | #endif // ART_LIBARTBASE_BASE_MEM_MAP_H_ |