blob: 0ecb414614937f3af6fbefbf25c50982fd4d963c [file] [log] [blame]
Brian Carlstromdb4d5402011-08-09 12:18:28 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_MEM_MAP_H_
18#define ART_RUNTIME_MEM_MAP_H_
Brian Carlstromdb4d5402011-08-09 12:18:28 -070019
Brian Carlstrom27ec9612011-09-19 20:20:38 -070020#include <stddef.h>
21#include <sys/types.h>
Brian Carlstromdb4d5402011-08-09 12:18:28 -070022
Andreas Gampe0dfc3152017-04-24 07:58:06 -070023#include <map>
Igor Murashkin5573c372017-11-16 13:34:30 -080024#include <mutex>
Andreas Gampe0dfc3152017-04-24 07:58:06 -070025#include <string>
26
27#include "android-base/thread_annotations.h"
Brian Carlstromdb4d5402011-08-09 12:18:28 -070028
29namespace art {
30
Andreas Gampe651ba592017-06-14 14:41:33 -070031#if defined(__LP64__) && (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
Ian Rogersc3ccc102014-06-25 11:52:14 -070032#define USE_ART_LOW_4G_ALLOCATOR 1
33#else
Andreas Gampe651ba592017-06-14 14:41:33 -070034#if defined(__LP64__) && !defined(__x86_64__)
35#error "Unrecognized 64-bit architecture."
36#endif
Ian Rogersc3ccc102014-06-25 11:52:14 -070037#define USE_ART_LOW_4G_ALLOCATOR 0
38#endif
39
Ian Rogersc5f17732014-06-05 20:48:42 -070040#ifdef __linux__
41static constexpr bool kMadviseZeroes = true;
Alex Lightca97ada2018-02-02 09:25:31 -080042#define HAVE_MREMAP_SYSCALL true
Ian Rogersc5f17732014-06-05 20:48:42 -070043#else
44static constexpr bool kMadviseZeroes = false;
Alex Lightca97ada2018-02-02 09:25:31 -080045// We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
46// present.
47#define HAVE_MREMAP_SYSCALL false
Ian Rogersc5f17732014-06-05 20:48:42 -070048#endif
49
Brian Carlstromdb4d5402011-08-09 12:18:28 -070050// Used to keep track of mmap segments.
Andreas Gamped8f26db2014-05-19 17:01:13 -070051//
52// On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
53// for free pages. For security, the start of this scan should be randomized. This requires a
54// dynamic initializer.
55// For this to work, it is paramount that there are no other static initializers that access MemMap.
56// Otherwise, calls might see uninitialized values.
Brian Carlstromdb4d5402011-08-09 12:18:28 -070057class MemMap {
58 public:
Alex Lightca97ada2018-02-02 09:25:31 -080059 static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
60
61 // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
62 // relinquishes ownership of the source mmap.
63 //
64 // For the call to be successful:
65 // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
66 // [source->Begin(), source->End()].
67 // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
68 // with them.
69 // * kCanReplaceMapping must be true.
70 // * Neither source nor dest may use manual redzones.
71 // * Both source and dest must have the same offset from the nearest page boundary.
72 // * mremap must succeed when called on the mappings.
73 //
74 // If this call succeeds it will return true and:
75 // * Deallocate *source
76 // * Sets *source to nullptr
77 // * The protection of this will remain the same.
78 // * The size of this will be the size of the source
79 // * The data in this will be the data from source.
80 //
81 // If this call fails it will return false and make no changes to *source or this. The ownership
82 // of the source mmap is returned to the caller.
83 bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
84
Elliott Hughesecd3a6f2012-06-06 18:16:37 -070085 // Request an anonymous region of length 'byte_count' and a requested base address.
Mathieu Chartier2cebb242015-04-21 16:50:40 -070086 // Use null as the requested base address if you don't care.
Vladimir Marko5c42c292015-02-25 12:02:49 +000087 // "reuse" allows re-mapping an address range from an existing mapping.
Elliott Hughes6c9c06d2011-11-07 16:43:47 -080088 //
89 // The word "anonymous" in this context means "not backed by a file". The supplied
Nicolas Geoffraya25dce92016-01-12 16:41:10 +000090 // 'name' will be used -- on systems that support it -- to give the mapping
Elliott Hughes6c9c06d2011-11-07 16:43:47 -080091 // a name.
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070092 //
Mathieu Chartier2cebb242015-04-21 16:50:40 -070093 // On success, returns returns a MemMap instance. On failure, returns null.
Nicolas Geoffraya25dce92016-01-12 16:41:10 +000094 static MemMap* MapAnonymous(const char* name,
Mathieu Chartier42bddce2015-11-09 15:16:56 -080095 uint8_t* addr,
96 size_t byte_count,
97 int prot,
98 bool low_4gb,
99 bool reuse,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000100 std::string* error_msg,
Nicolas Geoffray58a73d22016-11-29 21:49:43 +0000101 bool use_ashmem = true);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700102
David Srbecky1baabf02015-06-16 17:12:34 +0000103 // Create placeholder for a region allocated by direct call to mmap.
104 // This is useful when we do not have control over the code calling mmap,
105 // but when we still want to keep track of it in the list.
106 // The region is not considered to be owned and will not be unmmaped.
107 static MemMap* MapDummy(const char* name, uint8_t* addr, size_t byte_count);
108
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700109 // Map part of a file, taking care of non-page aligned offsets. The
110 // "start" offset is absolute, not relative.
111 //
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700112 // On success, returns returns a MemMap instance. On failure, returns null.
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800113 static MemMap* MapFile(size_t byte_count,
114 int prot,
115 int flags,
116 int fd,
117 off_t start,
118 bool low_4gb,
119 const char* filename,
120 std::string* error_msg) {
121 return MapFileAtAddress(nullptr,
122 byte_count,
123 prot,
124 flags,
125 fd,
126 start,
127 /*low_4gb*/low_4gb,
128 /*reuse*/false,
129 filename,
130 error_msg);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700131 }
132
Mathieu Chartierebe2dfc2015-11-24 13:47:52 -0800133 // Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute,
134 // not relative. This version allows requesting a specific address for the base of the mapping.
135 // "reuse" allows us to create a view into an existing mapping where we do not take ownership of
136 // the memory. If error_msg is null then we do not print /proc/maps to the log if
137 // MapFileAtAddress fails. This helps improve performance of the fail case since reading and
138 // printing /proc/maps takes several milliseconds in the worst case.
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700139 //
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700140 // On success, returns returns a MemMap instance. On failure, returns null.
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800141 static MemMap* MapFileAtAddress(uint8_t* addr,
142 size_t byte_count,
143 int prot,
144 int flags,
145 int fd,
146 off_t start,
147 bool low_4gb,
148 bool reuse,
149 const char* filename,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700150 std::string* error_msg);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700151
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700152 // Releases the memory mapping.
David Sehr1b14fb82017-02-01 10:42:11 -0800153 ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700154
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800155 const std::string& GetName() const {
156 return name_;
157 }
158
Vladimir Marko9bdf1082016-01-21 12:15:52 +0000159 bool Sync();
160
Logan Chiend88fa262012-06-06 15:23:32 +0800161 bool Protect(int prot);
162
Ian Rogersc5f17732014-06-05 20:48:42 -0700163 void MadviseDontNeedAndZero();
164
Ian Rogers1c849e52012-06-28 14:00:33 -0700165 int GetProtect() const {
166 return prot_;
167 }
168
Ian Rogers13735952014-10-08 12:43:28 -0700169 uint8_t* Begin() const {
Ian Rogers30fab402012-01-23 15:43:46 -0800170 return begin_;
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700171 }
172
Ian Rogers30fab402012-01-23 15:43:46 -0800173 size_t Size() const {
174 return size_;
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700175 }
176
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800177 // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
178 void SetSize(size_t new_size);
179
Ian Rogers13735952014-10-08 12:43:28 -0700180 uint8_t* End() const {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700181 return Begin() + Size();
182 }
183
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800184 void* BaseBegin() const {
185 return base_begin_;
186 }
187
188 size_t BaseSize() const {
189 return base_size_;
190 }
191
192 void* BaseEnd() const {
Ian Rogers13735952014-10-08 12:43:28 -0700193 return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800194 }
195
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700196 bool HasAddress(const void* addr) const {
197 return Begin() <= addr && addr < End();
Brian Carlstromb765be02011-08-17 23:54:10 -0700198 }
199
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700200 // Unmap the pages at end and remap them to create another memory map.
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800201 MemMap* RemapAtEnd(uint8_t* new_end,
202 const char* tail_name,
203 int tail_prot,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000204 std::string* error_msg,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100205 bool use_ashmem = true);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700206
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700207 static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
David Sehr1b14fb82017-02-01 10:42:11 -0800208 REQUIRES(!MemMap::mem_maps_lock_);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100209 static void DumpMaps(std::ostream& os, bool terse = false)
David Sehr1b14fb82017-02-01 10:42:11 -0800210 REQUIRES(!MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700211
David Sehr1b14fb82017-02-01 10:42:11 -0800212 // Init and Shutdown are NOT thread safe.
213 // Both may be called multiple times and MemMap objects may be created any
214 // time after the first call to Init and before the first call to Shutodwn.
215 static void Init() REQUIRES(!MemMap::mem_maps_lock_);
216 static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700217
Hiroshi Yamauchi6edb9ae2016-02-08 14:18:21 -0800218 // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
219 // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
220 // intermittently.
221 void TryReadable();
222
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800223 // Align the map by unmapping the unaligned parts at the lower and the higher ends.
224 void AlignBy(size_t size);
225
Andreas Gampe0dfc3152017-04-24 07:58:06 -0700226 // For annotation reasons.
227 static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) {
228 return nullptr;
229 }
230
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700231 private:
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800232 MemMap(const std::string& name,
233 uint8_t* begin,
234 size_t size,
235 void* base_begin,
236 size_t base_size,
237 int prot,
238 bool reuse,
David Sehr1b14fb82017-02-01 10:42:11 -0800239 size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700240
Vladimir Marko17a924a2015-05-08 15:17:32 +0100241 static void DumpMapsLocked(std::ostream& os, bool terse)
David Sehr1b14fb82017-02-01 10:42:11 -0800242 REQUIRES(MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700243 static bool HasMemMap(MemMap* map)
David Sehr1b14fb82017-02-01 10:42:11 -0800244 REQUIRES(MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700245 static MemMap* GetLargestMemMapAt(void* address)
David Sehr1b14fb82017-02-01 10:42:11 -0800246 REQUIRES(MemMap::mem_maps_lock_);
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700247 static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
David Sehr1b14fb82017-02-01 10:42:11 -0800248 REQUIRES(!MemMap::mem_maps_lock_);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700249
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800250 // Internal version of mmap that supports low 4gb emulation.
251 static void* MapInternal(void* addr,
252 size_t length,
253 int prot,
254 int flags,
255 int fd,
256 off_t offset,
Andreas Gampe651ba592017-06-14 14:41:33 -0700257 bool low_4gb)
258 REQUIRES(!MemMap::mem_maps_lock_);
259 static void* MapInternalArtLow4GBAllocator(size_t length,
260 int prot,
261 int flags,
262 int fd,
263 off_t offset)
264 REQUIRES(!MemMap::mem_maps_lock_);
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800265
Jim_Guoa62a5882014-04-28 11:11:57 +0800266 const std::string name_;
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800267 uint8_t* begin_; // Start of data. May be changed by AlignBy.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700268 size_t size_; // Length of data.
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700269
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800270 void* base_begin_; // Page-aligned base address. May be changed by AlignBy.
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700271 size_t base_size_; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
Ian Rogers1c849e52012-06-28 14:00:33 -0700272 int prot_; // Protection of the map.
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700273
Jim_Guoa62a5882014-04-28 11:11:57 +0800274 // When reuse_ is true, this is just a view of an existing mapping
275 // and we do not take ownership and are not responsible for
276 // unmapping.
277 const bool reuse_;
278
Alex Lightca97ada2018-02-02 09:25:31 -0800279 // When already_unmapped_ is true the destructor will not call munmap.
280 bool already_unmapped_;
281
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700282 const size_t redzone_size_;
283
Ian Rogersc3ccc102014-06-25 11:52:14 -0700284#if USE_ART_LOW_4G_ALLOCATOR
285 static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000286#endif
287
David Sehr1b14fb82017-02-01 10:42:11 -0800288 static std::mutex* mem_maps_lock_;
289
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700290 friend class MemMapTest; // To allow access to base_begin_ and base_size_.
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700291};
Mathieu Chartier6e6078a2016-10-24 15:45:41 -0700292
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800293std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700294
Mathieu Chartier6e6078a2016-10-24 15:45:41 -0700295// Zero and release pages if possible, no requirements on alignments.
296void ZeroAndReleasePages(void* address, size_t length);
297
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700298} // namespace art
299
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700300#endif // ART_RUNTIME_MEM_MAP_H_