blob: ceb4c33ee30b58cec368d584e73d00218c62fcf8 [file] [log] [blame]
Brian Carlstromdb4d5402011-08-09 12:18:28 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_MEM_MAP_H_
18#define ART_RUNTIME_MEM_MAP_H_
Brian Carlstromdb4d5402011-08-09 12:18:28 -070019
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -070020#include "base/mutex.h"
21
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070022#include <string>
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -070023#include <map>
David Sehr1b14fb82017-02-01 10:42:11 -080024#include <mutex>
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070025
Brian Carlstrom27ec9612011-09-19 20:20:38 -070026#include <stddef.h>
Elliott Hughesa168c832012-06-12 15:34:20 -070027#include <sys/mman.h> // For the PROT_* and MAP_* constants.
Brian Carlstrom27ec9612011-09-19 20:20:38 -070028#include <sys/types.h>
Brian Carlstromdb4d5402011-08-09 12:18:28 -070029
Mathieu Chartierbad02672014-08-25 13:08:22 -070030#include "base/allocator.h"
Brian Carlstrom27ec9612011-09-19 20:20:38 -070031#include "globals.h"
Brian Carlstromdb4d5402011-08-09 12:18:28 -070032
33namespace art {
34
Ian Rogersc3ccc102014-06-25 11:52:14 -070035#if defined(__LP64__) && (!defined(__x86_64__) || defined(__APPLE__))
36#define USE_ART_LOW_4G_ALLOCATOR 1
37#else
38#define USE_ART_LOW_4G_ALLOCATOR 0
39#endif
40
Ian Rogersc5f17732014-06-05 20:48:42 -070041#ifdef __linux__
42static constexpr bool kMadviseZeroes = true;
43#else
44static constexpr bool kMadviseZeroes = false;
45#endif
46
Brian Carlstromdb4d5402011-08-09 12:18:28 -070047// Used to keep track of mmap segments.
Andreas Gamped8f26db2014-05-19 17:01:13 -070048//
49// On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
50// for free pages. For security, the start of this scan should be randomized. This requires a
51// dynamic initializer.
52// For this to work, it is paramount that there are no other static initializers that access MemMap.
53// Otherwise, calls might see uninitialized values.
Brian Carlstromdb4d5402011-08-09 12:18:28 -070054class MemMap {
55 public:
Elliott Hughesecd3a6f2012-06-06 18:16:37 -070056 // Request an anonymous region of length 'byte_count' and a requested base address.
Mathieu Chartier2cebb242015-04-21 16:50:40 -070057 // Use null as the requested base address if you don't care.
Vladimir Marko5c42c292015-02-25 12:02:49 +000058 // "reuse" allows re-mapping an address range from an existing mapping.
Elliott Hughes6c9c06d2011-11-07 16:43:47 -080059 //
60 // The word "anonymous" in this context means "not backed by a file". The supplied
Nicolas Geoffraya25dce92016-01-12 16:41:10 +000061 // 'name' will be used -- on systems that support it -- to give the mapping
Elliott Hughes6c9c06d2011-11-07 16:43:47 -080062 // a name.
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070063 //
Mathieu Chartier2cebb242015-04-21 16:50:40 -070064 // On success, returns returns a MemMap instance. On failure, returns null.
Nicolas Geoffraya25dce92016-01-12 16:41:10 +000065 static MemMap* MapAnonymous(const char* name,
Mathieu Chartier42bddce2015-11-09 15:16:56 -080066 uint8_t* addr,
67 size_t byte_count,
68 int prot,
69 bool low_4gb,
70 bool reuse,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +000071 std::string* error_msg,
Nicolas Geoffray58a73d22016-11-29 21:49:43 +000072 bool use_ashmem = true);
Brian Carlstromdb4d5402011-08-09 12:18:28 -070073
David Srbecky1baabf02015-06-16 17:12:34 +000074 // Create placeholder for a region allocated by direct call to mmap.
75 // This is useful when we do not have control over the code calling mmap,
76 // but when we still want to keep track of it in the list.
77 // The region is not considered to be owned and will not be unmmaped.
78 static MemMap* MapDummy(const char* name, uint8_t* addr, size_t byte_count);
79
Brian Carlstromdb4d5402011-08-09 12:18:28 -070080 // Map part of a file, taking care of non-page aligned offsets. The
81 // "start" offset is absolute, not relative.
82 //
Mathieu Chartier2cebb242015-04-21 16:50:40 -070083 // On success, returns returns a MemMap instance. On failure, returns null.
Mathieu Chartier42bddce2015-11-09 15:16:56 -080084 static MemMap* MapFile(size_t byte_count,
85 int prot,
86 int flags,
87 int fd,
88 off_t start,
89 bool low_4gb,
90 const char* filename,
91 std::string* error_msg) {
92 return MapFileAtAddress(nullptr,
93 byte_count,
94 prot,
95 flags,
96 fd,
97 start,
98 /*low_4gb*/low_4gb,
99 /*reuse*/false,
100 filename,
101 error_msg);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700102 }
103
Mathieu Chartierebe2dfc2015-11-24 13:47:52 -0800104 // Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute,
105 // not relative. This version allows requesting a specific address for the base of the mapping.
106 // "reuse" allows us to create a view into an existing mapping where we do not take ownership of
107 // the memory. If error_msg is null then we do not print /proc/maps to the log if
108 // MapFileAtAddress fails. This helps improve performance of the fail case since reading and
109 // printing /proc/maps takes several milliseconds in the worst case.
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700110 //
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700111 // On success, returns returns a MemMap instance. On failure, returns null.
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800112 static MemMap* MapFileAtAddress(uint8_t* addr,
113 size_t byte_count,
114 int prot,
115 int flags,
116 int fd,
117 off_t start,
118 bool low_4gb,
119 bool reuse,
120 const char* filename,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700121 std::string* error_msg);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700122
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700123 // Releases the memory mapping.
David Sehr1b14fb82017-02-01 10:42:11 -0800124 ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700125
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800126 const std::string& GetName() const {
127 return name_;
128 }
129
Vladimir Marko9bdf1082016-01-21 12:15:52 +0000130 bool Sync();
131
Logan Chiend88fa262012-06-06 15:23:32 +0800132 bool Protect(int prot);
133
Ian Rogersc5f17732014-06-05 20:48:42 -0700134 void MadviseDontNeedAndZero();
135
Ian Rogers1c849e52012-06-28 14:00:33 -0700136 int GetProtect() const {
137 return prot_;
138 }
139
Ian Rogers13735952014-10-08 12:43:28 -0700140 uint8_t* Begin() const {
Ian Rogers30fab402012-01-23 15:43:46 -0800141 return begin_;
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700142 }
143
Ian Rogers30fab402012-01-23 15:43:46 -0800144 size_t Size() const {
145 return size_;
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700146 }
147
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800148 // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
149 void SetSize(size_t new_size);
150
Ian Rogers13735952014-10-08 12:43:28 -0700151 uint8_t* End() const {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700152 return Begin() + Size();
153 }
154
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800155 void* BaseBegin() const {
156 return base_begin_;
157 }
158
159 size_t BaseSize() const {
160 return base_size_;
161 }
162
163 void* BaseEnd() const {
Ian Rogers13735952014-10-08 12:43:28 -0700164 return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800165 }
166
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700167 bool HasAddress(const void* addr) const {
168 return Begin() <= addr && addr < End();
Brian Carlstromb765be02011-08-17 23:54:10 -0700169 }
170
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700171 // Unmap the pages at end and remap them to create another memory map.
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800172 MemMap* RemapAtEnd(uint8_t* new_end,
173 const char* tail_name,
174 int tail_prot,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000175 std::string* error_msg,
Nicolas Geoffray58a73d22016-11-29 21:49:43 +0000176 bool use_ashmem = true);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700177
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700178 static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
David Sehr1b14fb82017-02-01 10:42:11 -0800179 REQUIRES(!MemMap::mem_maps_lock_);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100180 static void DumpMaps(std::ostream& os, bool terse = false)
David Sehr1b14fb82017-02-01 10:42:11 -0800181 REQUIRES(!MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700182
Mathieu Chartierbad02672014-08-25 13:08:22 -0700183 typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
184
David Sehr1b14fb82017-02-01 10:42:11 -0800185 // Init and Shutdown are NOT thread safe.
186 // Both may be called multiple times and MemMap objects may be created any
187 // time after the first call to Init and before the first call to Shutodwn.
188 static void Init() REQUIRES(!MemMap::mem_maps_lock_);
189 static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700190
Hiroshi Yamauchi6edb9ae2016-02-08 14:18:21 -0800191 // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
192 // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
193 // intermittently.
194 void TryReadable();
195
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800196 // Align the map by unmapping the unaligned parts at the lower and the higher ends.
197 void AlignBy(size_t size);
198
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700199 private:
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800200 MemMap(const std::string& name,
201 uint8_t* begin,
202 size_t size,
203 void* base_begin,
204 size_t base_size,
205 int prot,
206 bool reuse,
David Sehr1b14fb82017-02-01 10:42:11 -0800207 size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700208
Vladimir Marko17a924a2015-05-08 15:17:32 +0100209 static void DumpMapsLocked(std::ostream& os, bool terse)
David Sehr1b14fb82017-02-01 10:42:11 -0800210 REQUIRES(MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700211 static bool HasMemMap(MemMap* map)
David Sehr1b14fb82017-02-01 10:42:11 -0800212 REQUIRES(MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700213 static MemMap* GetLargestMemMapAt(void* address)
David Sehr1b14fb82017-02-01 10:42:11 -0800214 REQUIRES(MemMap::mem_maps_lock_);
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700215 static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
David Sehr1b14fb82017-02-01 10:42:11 -0800216 REQUIRES(!MemMap::mem_maps_lock_);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700217
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800218 // Internal version of mmap that supports low 4gb emulation.
219 static void* MapInternal(void* addr,
220 size_t length,
221 int prot,
222 int flags,
223 int fd,
224 off_t offset,
225 bool low_4gb);
226
Jim_Guoa62a5882014-04-28 11:11:57 +0800227 const std::string name_;
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800228 uint8_t* begin_; // Start of data. May be changed by AlignBy.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700229 size_t size_; // Length of data.
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700230
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800231 void* base_begin_; // Page-aligned base address. May be changed by AlignBy.
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700232 size_t base_size_; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
Ian Rogers1c849e52012-06-28 14:00:33 -0700233 int prot_; // Protection of the map.
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700234
Jim_Guoa62a5882014-04-28 11:11:57 +0800235 // When reuse_ is true, this is just a view of an existing mapping
236 // and we do not take ownership and are not responsible for
237 // unmapping.
238 const bool reuse_;
239
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700240 const size_t redzone_size_;
241
Ian Rogersc3ccc102014-06-25 11:52:14 -0700242#if USE_ART_LOW_4G_ALLOCATOR
243 static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000244#endif
245
David Sehr1b14fb82017-02-01 10:42:11 -0800246 static std::mutex* mem_maps_lock_;
247
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700248 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
David Sehr1b14fb82017-02-01 10:42:11 -0800249 static Maps* maps_ GUARDED_BY(MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700250
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700251 friend class MemMapTest; // To allow access to base_begin_ and base_size_.
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700252};
Mathieu Chartier6e6078a2016-10-24 15:45:41 -0700253
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800254std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800255std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700256
Mathieu Chartier6e6078a2016-10-24 15:45:41 -0700257// Zero and release pages if possible, no requirements on alignments.
258void ZeroAndReleasePages(void* address, size_t length);
259
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700260} // namespace art
261
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700262#endif // ART_RUNTIME_MEM_MAP_H_