blob: 9c0b025d02b1f1dc947f07bc2e9a7dcb50c96b0b [file] [log] [blame]
Ian Rogers1d54e732013-05-02 21:10:01 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "image_space.h"
18
Andreas Gampe70be1fb2014-10-31 16:45:19 -070019#include <sys/statvfs.h>
Alex Light25396132014-08-27 15:37:23 -070020#include <sys/types.h>
Narayan Kamath5a2be3f2015-02-16 13:51:51 +000021#include <unistd.h>
Alex Light25396132014-08-27 15:37:23 -070022
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070023#include <random>
24
Andreas Gampe46ee31b2016-12-14 10:11:49 -080025#include "android-base/stringprintf.h"
Andreas Gampe9186ced2016-12-12 14:28:21 -080026#include "android-base/strings.h"
27
Andreas Gampe639b2b12019-01-08 10:32:50 -080028#include "arch/instruction_set.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070029#include "art_field-inl.h"
Andreas Gampec6ea7d02017-02-01 16:46:28 -080030#include "art_method-inl.h"
Vladimir Marko0ace5632018-12-14 11:11:47 +000031#include "base/array_ref.h"
Vladimir Marko4df2d802018-09-27 16:42:44 +000032#include "base/bit_memory_region.h"
Andreas Gampe8228cdf2017-05-30 15:03:54 -070033#include "base/callee_save_type.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070034#include "base/enums.h"
David Sehr891a50e2017-10-27 17:01:07 -070035#include "base/file_utils.h"
Ian Rogersc7dd2952014-10-21 23:31:19 -070036#include "base/macros.h"
David Sehrc431b9d2018-03-02 12:01:51 -080037#include "base/os.h"
Narayan Kamathd1c606f2014-06-09 16:50:19 +010038#include "base/scoped_flock.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070039#include "base/stl_util.h"
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080040#include "base/systrace.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010041#include "base/time_utils.h"
David Sehrc431b9d2018-03-02 12:01:51 -080042#include "base/utils.h"
Vladimir Marko4df2d802018-09-27 16:42:44 +000043#include "class_root.h"
David Sehr013fd802018-01-11 22:55:24 -080044#include "dex/art_dex_file_loader.h"
David Sehr9e734c72018-01-04 17:56:19 -080045#include "dex/dex_file_loader.h"
David Sehr97c381e2017-02-01 15:09:58 -080046#include "exec_utils.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070047#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier93c21ba2018-12-10 13:08:30 -080048#include "gc/task_processor.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080049#include "image-inl.h"
Andreas Gampebec63582015-11-20 19:26:51 -080050#include "image_space_fs.h"
Mathieu Chartier74ccee62018-10-10 10:30:29 -070051#include "intern_table-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070052#include "mirror/class-inl.h"
Vladimir Marko0eefb9b2019-03-27 15:04:31 +000053#include "mirror/executable-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070054#include "mirror/object-inl.h"
Andreas Gampec6ea7d02017-02-01 16:46:28 -080055#include "mirror/object-refvisitor-inl.h"
Brian Carlstrom56d947f2013-07-15 13:14:23 -070056#include "oat_file.h"
Andreas Gamped482e732017-04-24 17:59:09 -070057#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070058#include "space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070059
60namespace art {
61namespace gc {
62namespace space {
63
Vladimir Marko0ace5632018-12-14 11:11:47 +000064using android::base::StringAppendF;
Andreas Gampe46ee31b2016-12-14 10:11:49 -080065using android::base::StringPrintf;
66
Ian Rogersef7d42f2014-01-06 12:55:46 -080067Atomic<uint32_t> ImageSpace::bitmap_index_(0);
Ian Rogers1d54e732013-05-02 21:10:01 -070068
Jeff Haodcdc85b2015-12-04 14:06:18 -080069ImageSpace::ImageSpace(const std::string& image_filename,
70 const char* image_location,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010071 MemMap&& mem_map,
Mathieu Chartier6f382012019-07-30 09:47:35 -070072 accounting::ContinuousSpaceBitmap&& live_bitmap,
Mathieu Chartier2d124ec2016-01-05 18:03:15 -080073 uint8_t* end)
74 : MemMapSpace(image_filename,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010075 std::move(mem_map),
76 mem_map.Begin(),
Mathieu Chartier2d124ec2016-01-05 18:03:15 -080077 end,
78 end,
Narayan Kamath52f84882014-05-02 10:10:39 +010079 kGcRetentionPolicyNeverCollect),
Vladimir Markoc09cd052018-08-23 16:36:36 +010080 live_bitmap_(std::move(live_bitmap)),
Jeff Haodcdc85b2015-12-04 14:06:18 -080081 oat_file_non_owned_(nullptr),
Mathieu Chartier2d124ec2016-01-05 18:03:15 -080082 image_location_(image_location) {
Mathieu Chartier6f382012019-07-30 09:47:35 -070083 DCHECK(live_bitmap_.IsValid());
Ian Rogers1d54e732013-05-02 21:10:01 -070084}
85
Alex Lightcf4bf382014-07-24 11:29:14 -070086static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta) {
87 CHECK_ALIGNED(min_delta, kPageSize);
88 CHECK_ALIGNED(max_delta, kPageSize);
89 CHECK_LT(min_delta, max_delta);
90
Alex Light15324762015-11-19 11:03:10 -080091 int32_t r = GetRandomNumber<int32_t>(min_delta, max_delta);
Alex Lightcf4bf382014-07-24 11:29:14 -070092 if (r % 2 == 0) {
93 r = RoundUp(r, kPageSize);
94 } else {
95 r = RoundDown(r, kPageSize);
96 }
97 CHECK_LE(min_delta, r);
98 CHECK_GE(max_delta, r);
99 CHECK_ALIGNED(r, kPageSize);
100 return r;
101}
102
Andreas Gampea463b6a2016-08-12 21:53:32 -0700103static int32_t ChooseRelocationOffsetDelta() {
104 return ChooseRelocationOffsetDelta(ART_BASE_ADDRESS_MIN_DELTA, ART_BASE_ADDRESS_MAX_DELTA);
105}
106
107static bool GenerateImage(const std::string& image_filename,
108 InstructionSet image_isa,
Alex Light25396132014-08-27 15:37:23 -0700109 std::string* error_msg) {
Vladimir Marko91f10322018-12-07 18:04:10 +0000110 Runtime* runtime = Runtime::Current();
111 const std::vector<std::string>& boot_class_path = runtime->GetBootClassPath();
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700112 if (boot_class_path.empty()) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700113 *error_msg = "Failed to generate image because no boot class path specified";
114 return false;
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700115 }
Alex Light25396132014-08-27 15:37:23 -0700116 // We should clean up so we are more likely to have room for the image.
117 if (Runtime::Current()->IsZygote()) {
Andreas Gampe3c13a792014-09-18 20:56:04 -0700118 LOG(INFO) << "Pruning dalvik-cache since we are generating an image and will need to recompile";
Narayan Kamath28bc9872014-11-07 17:46:28 +0000119 PruneDalvikCache(image_isa);
Alex Light25396132014-08-27 15:37:23 -0700120 }
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700121
Mathieu Chartier8bbc8c02013-07-31 16:27:01 -0700122 std::vector<std::string> arg_vector;
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700123
Tsu Chiang Chuang12e6d742014-05-22 10:22:25 -0700124 std::string dex2oat(Runtime::Current()->GetCompilerExecutable());
Mathieu Chartier08d7d442013-07-31 18:08:51 -0700125 arg_vector.push_back(dex2oat);
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700126
wangshumind6d878e2019-05-08 12:52:39 +0800127 char* dex2oat_bcp = getenv("DEX2OATBOOTCLASSPATH");
128 std::vector<std::string> dex2oat_bcp_vector;
129 if (dex2oat_bcp != nullptr) {
130 arg_vector.push_back("--runtime-arg");
131 arg_vector.push_back(StringPrintf("-Xbootclasspath:%s", dex2oat_bcp));
132 Split(dex2oat_bcp, ':', &dex2oat_bcp_vector);
133 }
134
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700135 std::string image_option_string("--image=");
Narayan Kamath52f84882014-05-02 10:10:39 +0100136 image_option_string += image_filename;
Mathieu Chartier8bbc8c02013-07-31 16:27:01 -0700137 arg_vector.push_back(image_option_string);
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700138
wangshumind6d878e2019-05-08 12:52:39 +0800139 if (!dex2oat_bcp_vector.empty()) {
140 for (size_t i = 0u; i < dex2oat_bcp_vector.size(); i++) {
141 arg_vector.push_back(std::string("--dex-file=") + dex2oat_bcp_vector[i]);
142 arg_vector.push_back(std::string("--dex-location=") + dex2oat_bcp_vector[i]);
143 }
144 } else {
145 const std::vector<std::string>& boot_class_path_locations =
146 runtime->GetBootClassPathLocations();
147 DCHECK_EQ(boot_class_path.size(), boot_class_path_locations.size());
148 for (size_t i = 0u; i < boot_class_path.size(); i++) {
149 arg_vector.push_back(std::string("--dex-file=") + boot_class_path[i]);
150 arg_vector.push_back(std::string("--dex-location=") + boot_class_path_locations[i]);
151 }
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700152 }
153
154 std::string oat_file_option_string("--oat-file=");
Brian Carlstrom2f1e15c2014-10-27 16:27:06 -0700155 oat_file_option_string += ImageHeader::GetOatLocationFromImageLocation(image_filename);
Mathieu Chartier8bbc8c02013-07-31 16:27:01 -0700156 arg_vector.push_back(oat_file_option_string);
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700157
Sebastien Hertz0de11332015-05-13 12:14:05 +0200158 // Note: we do not generate a fully debuggable boot image so we do not pass the
159 // compiler flag --debuggable here.
160
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700161 Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&arg_vector);
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700162 CHECK_EQ(image_isa, kRuntimeISA)
163 << "We should always be generating an image for the current isa.";
Ian Rogers8afeb852014-04-02 14:55:49 -0700164
Andreas Gampea463b6a2016-08-12 21:53:32 -0700165 int32_t base_offset = ChooseRelocationOffsetDelta();
Alex Lightcf4bf382014-07-24 11:29:14 -0700166 LOG(INFO) << "Using an offset of 0x" << std::hex << base_offset << " from default "
167 << "art base address of 0x" << std::hex << ART_BASE_ADDRESS;
168 arg_vector.push_back(StringPrintf("--base=0x%x", ART_BASE_ADDRESS + base_offset));
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700169
Brian Carlstrom57309db2014-07-30 15:13:25 -0700170 if (!kIsTargetBuild) {
Mathieu Chartier8bbc8c02013-07-31 16:27:01 -0700171 arg_vector.push_back("--host");
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700172 }
173
Brian Carlstrom6449c622014-02-10 23:48:36 -0800174 const std::vector<std::string>& compiler_options = Runtime::Current()->GetImageCompilerOptions();
Brian Carlstrom2ec65202014-03-03 15:16:37 -0800175 for (size_t i = 0; i < compiler_options.size(); ++i) {
Brian Carlstrom6449c622014-02-10 23:48:36 -0800176 arg_vector.push_back(compiler_options[i].c_str());
177 }
178
Andreas Gampe9186ced2016-12-12 14:28:21 -0800179 std::string command_line(android::base::Join(arg_vector, ' '));
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700180 LOG(INFO) << "GenerateImage: " << command_line;
Brian Carlstrom6449c622014-02-10 23:48:36 -0800181 return Exec(arg_vector, error_msg);
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700182}
183
Andreas Gampea463b6a2016-08-12 21:53:32 -0700184static bool FindImageFilenameImpl(const char* image_location,
185 const InstructionSet image_isa,
186 bool* has_system,
187 std::string* system_filename,
188 bool* dalvik_cache_exists,
189 std::string* dalvik_cache,
190 bool* is_global_cache,
191 bool* has_cache,
192 std::string* cache_filename) {
193 DCHECK(dalvik_cache != nullptr);
194
Alex Lighta59dd802014-07-02 16:28:08 -0700195 *has_system = false;
196 *has_cache = false;
Brian Carlstrom0e12bdc2014-05-14 17:44:28 -0700197 // image_location = /system/framework/boot.art
198 // system_image_location = /system/framework/<image_isa>/boot.art
199 std::string system_image_filename(GetSystemImageFilename(image_location, image_isa));
200 if (OS::FileExists(system_image_filename.c_str())) {
Alex Lighta59dd802014-07-02 16:28:08 -0700201 *system_filename = system_image_filename;
202 *has_system = true;
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700203 }
Narayan Kamath52f84882014-05-02 10:10:39 +0100204
Alex Lighta59dd802014-07-02 16:28:08 -0700205 bool have_android_data = false;
206 *dalvik_cache_exists = false;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700207 GetDalvikCache(GetInstructionSetString(image_isa),
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100208 /*create_if_absent=*/ true,
Andreas Gampea463b6a2016-08-12 21:53:32 -0700209 dalvik_cache,
210 &have_android_data,
211 dalvik_cache_exists,
212 is_global_cache);
Narayan Kamath52f84882014-05-02 10:10:39 +0100213
Vladimir Marko82e1e272018-08-20 13:38:06 +0000214 if (*dalvik_cache_exists) {
215 DCHECK(have_android_data);
Alex Lighta59dd802014-07-02 16:28:08 -0700216 // Always set output location even if it does not exist,
217 // so that the caller knows where to create the image.
218 //
219 // image_location = /system/framework/boot.art
Vladimir Marko82e1e272018-08-20 13:38:06 +0000220 // *image_filename = /data/dalvik-cache/<image_isa>/system@framework@boot.art
Alex Lighta59dd802014-07-02 16:28:08 -0700221 std::string error_msg;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700222 if (!GetDalvikCacheFilename(image_location,
223 dalvik_cache->c_str(),
224 cache_filename,
225 &error_msg)) {
Alex Lighta59dd802014-07-02 16:28:08 -0700226 LOG(WARNING) << error_msg;
227 return *has_system;
228 }
229 *has_cache = OS::FileExists(cache_filename->c_str());
230 }
231 return *has_system || *has_cache;
232}
233
Andreas Gampea463b6a2016-08-12 21:53:32 -0700234bool ImageSpace::FindImageFilename(const char* image_location,
235 const InstructionSet image_isa,
236 std::string* system_filename,
237 bool* has_system,
238 std::string* cache_filename,
239 bool* dalvik_cache_exists,
240 bool* has_cache,
241 bool* is_global_cache) {
242 std::string dalvik_cache_unused;
243 return FindImageFilenameImpl(image_location,
244 image_isa,
245 has_system,
246 system_filename,
247 dalvik_cache_exists,
248 &dalvik_cache_unused,
249 is_global_cache,
250 has_cache,
251 cache_filename);
252}
253
Alex Lighta59dd802014-07-02 16:28:08 -0700254static bool ReadSpecificImageHeader(const char* filename, ImageHeader* image_header) {
255 std::unique_ptr<File> image_file(OS::OpenFileForReading(filename));
256 if (image_file.get() == nullptr) {
257 return false;
258 }
259 const bool success = image_file->ReadFully(image_header, sizeof(ImageHeader));
260 if (!success || !image_header->IsValid()) {
261 return false;
262 }
263 return true;
264}
265
Vladimir Marko4df2d802018-09-27 16:42:44 +0000266static std::unique_ptr<ImageHeader> ReadSpecificImageHeader(const char* filename,
267 std::string* error_msg) {
Alex Lighta59dd802014-07-02 16:28:08 -0700268 std::unique_ptr<ImageHeader> hdr(new ImageHeader);
269 if (!ReadSpecificImageHeader(filename, hdr.get())) {
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700270 *error_msg = StringPrintf("Unable to read image header for %s", filename);
Alex Lighta59dd802014-07-02 16:28:08 -0700271 return nullptr;
272 }
Vladimir Marko4df2d802018-09-27 16:42:44 +0000273 return hdr;
Narayan Kamath52f84882014-05-02 10:10:39 +0100274}
275
Vladimir Marko4df2d802018-09-27 16:42:44 +0000276std::unique_ptr<ImageHeader> ImageSpace::ReadImageHeader(const char* image_location,
277 const InstructionSet image_isa,
Andreas Gampe86823542019-02-25 09:38:49 -0800278 ImageSpaceLoadingOrder order,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000279 std::string* error_msg) {
Alex Lighta59dd802014-07-02 16:28:08 -0700280 std::string system_filename;
281 bool has_system = false;
282 std::string cache_filename;
283 bool has_cache = false;
284 bool dalvik_cache_exists = false;
Andreas Gampe3c13a792014-09-18 20:56:04 -0700285 bool is_global_cache = false;
Vladimir Marko4df2d802018-09-27 16:42:44 +0000286 if (FindImageFilename(image_location,
287 image_isa,
288 &system_filename,
289 &has_system,
290 &cache_filename,
291 &dalvik_cache_exists,
292 &has_cache,
293 &is_global_cache)) {
Andreas Gampe86823542019-02-25 09:38:49 -0800294 if (order == ImageSpaceLoadingOrder::kSystemFirst) {
295 if (has_system) {
296 return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
297 }
298 if (has_cache) {
299 return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
300 }
301 } else {
302 if (has_cache) {
303 return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
304 }
305 if (has_system) {
306 return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
307 }
Narayan Kamath52f84882014-05-02 10:10:39 +0100308 }
Narayan Kamath52f84882014-05-02 10:10:39 +0100309 }
310
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700311 *error_msg = StringPrintf("Unable to find image file for %s", image_location);
Narayan Kamath52f84882014-05-02 10:10:39 +0100312 return nullptr;
313}
314
Robert Sesekbfa1f8d2016-08-15 15:21:09 -0400315static bool CanWriteToDalvikCache(const InstructionSet isa) {
316 const std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(isa));
317 if (access(dalvik_cache.c_str(), O_RDWR) == 0) {
318 return true;
319 } else if (errno != EACCES) {
320 PLOG(WARNING) << "CanWriteToDalvikCache returned error other than EACCES";
321 }
322 return false;
323}
324
325static bool ImageCreationAllowed(bool is_global_cache,
326 const InstructionSet isa,
Vladimir Marko3364d182019-03-13 13:55:01 +0000327 bool is_zygote,
Robert Sesekbfa1f8d2016-08-15 15:21:09 -0400328 std::string* error_msg) {
Andreas Gampe3c13a792014-09-18 20:56:04 -0700329 // Anyone can write into a "local" cache.
330 if (!is_global_cache) {
331 return true;
332 }
333
Robert Sesekbfa1f8d2016-08-15 15:21:09 -0400334 // Only the zygote running as root is allowed to create the global boot image.
335 // If the zygote is running as non-root (and cannot write to the dalvik-cache),
336 // then image creation is not allowed..
Vladimir Marko3364d182019-03-13 13:55:01 +0000337 if (is_zygote) {
Robert Sesekbfa1f8d2016-08-15 15:21:09 -0400338 return CanWriteToDalvikCache(isa);
Andreas Gampe3c13a792014-09-18 20:56:04 -0700339 }
340
341 *error_msg = "Only the zygote can create the global boot image.";
342 return false;
343}
344
Mathieu Chartier31e89252013-08-28 11:29:12 -0700345void ImageSpace::VerifyImageAllocations() {
Ian Rogers13735952014-10-08 12:43:28 -0700346 uint8_t* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Mathieu Chartier31e89252013-08-28 11:29:12 -0700347 while (current < End()) {
Mathieu Chartierc7853442015-03-27 14:35:38 -0700348 CHECK_ALIGNED(current, kObjectAlignment);
349 auto* obj = reinterpret_cast<mirror::Object*>(current);
Mathieu Chartier31e89252013-08-28 11:29:12 -0700350 CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
Mathieu Chartier6f382012019-07-30 09:47:35 -0700351 CHECK(live_bitmap_.Test(obj)) << obj->PrettyTypeOf();
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700352 if (kUseBakerReadBarrier) {
353 obj->AssertReadBarrierState();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800354 }
Mathieu Chartier31e89252013-08-28 11:29:12 -0700355 current += RoundUp(obj->SizeOf(), kObjectAlignment);
356 }
357}
358
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800359// Helper class for relocating from one range of memory to another.
360class RelocationRange {
361 public:
362 RelocationRange() = default;
363 RelocationRange(const RelocationRange&) = default;
364 RelocationRange(uintptr_t source, uintptr_t dest, uintptr_t length)
365 : source_(source),
366 dest_(dest),
367 length_(length) {}
368
Mathieu Chartier91edc622016-02-16 17:16:01 -0800369 bool InSource(uintptr_t address) const {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800370 return address - source_ < length_;
371 }
372
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800373 bool InDest(const void* dest) const {
374 return InDest(reinterpret_cast<uintptr_t>(dest));
375 }
376
Mathieu Chartier91edc622016-02-16 17:16:01 -0800377 bool InDest(uintptr_t address) const {
378 return address - dest_ < length_;
379 }
380
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800381 // Translate a source address to the destination space.
382 uintptr_t ToDest(uintptr_t address) const {
Mathieu Chartier91edc622016-02-16 17:16:01 -0800383 DCHECK(InSource(address));
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800384 return address + Delta();
385 }
386
Vladimir Markoc0b30c92019-07-23 14:58:25 +0100387 template <typename T>
388 T* ToDest(T* src) const {
389 return reinterpret_cast<T*>(ToDest(reinterpret_cast<uintptr_t>(src)));
390 }
391
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800392 // Returns the delta between the dest from the source.
Mathieu Chartier0b4cbd02016-03-08 16:49:58 -0800393 uintptr_t Delta() const {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800394 return dest_ - source_;
395 }
396
397 uintptr_t Source() const {
398 return source_;
399 }
400
401 uintptr_t Dest() const {
402 return dest_;
403 }
404
405 uintptr_t Length() const {
406 return length_;
407 }
408
409 private:
410 const uintptr_t source_;
411 const uintptr_t dest_;
412 const uintptr_t length_;
413};
414
Mathieu Chartier0b4cbd02016-03-08 16:49:58 -0800415std::ostream& operator<<(std::ostream& os, const RelocationRange& reloc) {
416 return os << "(" << reinterpret_cast<const void*>(reloc.Source()) << "-"
417 << reinterpret_cast<const void*>(reloc.Source() + reloc.Length()) << ")->("
418 << reinterpret_cast<const void*>(reloc.Dest()) << "-"
419 << reinterpret_cast<const void*>(reloc.Dest() + reloc.Length()) << ")";
420}
421
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800422template <PointerSize kPointerSize, typename HeapVisitor, typename NativeVisitor>
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800423class ImageSpace::PatchObjectVisitor final {
424 public:
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800425 explicit PatchObjectVisitor(HeapVisitor heap_visitor, NativeVisitor native_visitor)
426 : heap_visitor_(heap_visitor), native_visitor_(native_visitor) {}
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800427
Vladimir Markoc0b30c92019-07-23 14:58:25 +0100428 void VisitClass(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Class> class_class)
429 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800430 // A mirror::Class object consists of
431 // - instance fields inherited from j.l.Object,
432 // - instance fields inherited from j.l.Class,
433 // - embedded tables (vtable, interface method table),
434 // - static fields of the class itself.
435 // The reference fields are at the start of each field section (this is how the
436 // ClassLinker orders fields; except when that would create a gap between superclass
437 // fields and the first reference of the subclass due to alignment, it can be filled
438 // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
439
Vladimir Markoc0b30c92019-07-23 14:58:25 +0100440 DCHECK_ALIGNED(klass.Ptr(), kObjectAlignment);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800441 static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
442 // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
443 // This should be the only reference field in j.l.Object and we assert that below.
Vladimir Markoc0b30c92019-07-23 14:58:25 +0100444 DCHECK_EQ(class_class,
445 heap_visitor_(klass->GetClass<kVerifyNone, kWithoutReadBarrier>()));
446 klass->SetFieldObjectWithoutWriteBarrier<
447 /*kTransactionActive=*/ false,
448 /*kCheckTransaction=*/ true,
449 kVerifyNone>(mirror::Object::ClassOffset(), class_class);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800450 // Then patch the reference instance fields described by j.l.Class.class.
451 // Use the sizeof(Object) to determine where these reference fields start;
452 // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
453 // after patching but the j.l.Class may not have been patched yet.
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800454 size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
455 DCHECK_NE(num_reference_instance_fields, 0u);
456 static_assert(IsAligned<kHeapReferenceSize>(sizeof(mirror::Object)), "Size alignment check.");
457 MemberOffset instance_field_offset(sizeof(mirror::Object));
458 for (size_t i = 0; i != num_reference_instance_fields; ++i) {
459 PatchReferenceField(klass, instance_field_offset);
460 static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
461 "Heap reference sizes equality check.");
462 instance_field_offset =
463 MemberOffset(instance_field_offset.Uint32Value() + kHeapReferenceSize);
464 }
465 // Now that we have patched the `super_class_`, if this is the j.l.Class.class,
466 // we can get a reference to j.l.Object.class and assert that it has only one
467 // reference instance field (the `klass_` patched above).
468 if (kIsDebugBuild && klass == class_class) {
469 ObjPtr<mirror::Class> object_class =
470 klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
471 CHECK_EQ(object_class->NumReferenceInstanceFields<kVerifyNone>(), 1u);
472 }
473 // Then patch static fields.
474 size_t num_reference_static_fields = klass->NumReferenceStaticFields<kVerifyNone>();
475 if (num_reference_static_fields != 0u) {
476 MemberOffset static_field_offset =
477 klass->GetFirstReferenceStaticFieldOffset<kVerifyNone>(kPointerSize);
478 for (size_t i = 0; i != num_reference_static_fields; ++i) {
479 PatchReferenceField(klass, static_field_offset);
480 static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
481 "Heap reference sizes equality check.");
482 static_field_offset =
483 MemberOffset(static_field_offset.Uint32Value() + kHeapReferenceSize);
484 }
485 }
486 // Then patch native pointers.
Vladimir Markoc0b30c92019-07-23 14:58:25 +0100487 klass->FixupNativePointers<kVerifyNone>(klass.Ptr(), kPointerSize, *this);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800488 }
489
490 template <typename T>
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800491 T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const {
492 return (ptr != nullptr) ? native_visitor_(ptr) : nullptr;
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800493 }
494
Vladimir Markoc524e9e2019-03-26 10:54:50 +0000495 void VisitPointerArray(ObjPtr<mirror::PointerArray> pointer_array)
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800496 REQUIRES_SHARED(Locks::mutator_lock_) {
497 // Fully patch the pointer array, including the `klass_` field.
498 PatchReferenceField</*kMayBeNull=*/ false>(pointer_array, mirror::Object::ClassOffset());
499
500 int32_t length = pointer_array->GetLength<kVerifyNone>();
501 for (int32_t i = 0; i != length; ++i) {
502 ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(
503 pointer_array->ElementAddress<kVerifyNone>(i, kPointerSize));
504 PatchNativePointer</*kMayBeNull=*/ false>(method_entry);
505 }
506 }
507
508 void VisitObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
509 // Visit all reference fields.
510 object->VisitReferences</*kVisitNativeRoots=*/ false,
511 kVerifyNone,
512 kWithoutReadBarrier>(*this, *this);
513 // This function should not be called for classes.
514 DCHECK(!object->IsClass<kVerifyNone>());
515 }
516
517 // Visitor for VisitReferences().
Vladimir Marko4617d582019-03-28 13:48:31 +0000518 ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> object,
519 MemberOffset field_offset,
520 bool is_static)
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800521 const REQUIRES_SHARED(Locks::mutator_lock_) {
522 DCHECK(!is_static);
523 PatchReferenceField(object, field_offset);
524 }
525 // Visitor for VisitReferences(), java.lang.ref.Reference case.
Vladimir Marko4617d582019-03-28 13:48:31 +0000526 ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800527 REQUIRES_SHARED(Locks::mutator_lock_) {
528 DCHECK(klass->IsTypeOfReferenceClass());
529 this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
530 }
531 // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
532 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
533 const {}
534 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
535
Vladimir Marko423bebb2019-03-26 15:17:21 +0000536 void VisitDexCacheArrays(ObjPtr<mirror::DexCache> dex_cache)
537 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800538 FixupDexCacheArray<mirror::StringDexCacheType>(dex_cache,
539 mirror::DexCache::StringsOffset(),
540 dex_cache->NumStrings<kVerifyNone>());
541 FixupDexCacheArray<mirror::TypeDexCacheType>(dex_cache,
542 mirror::DexCache::ResolvedTypesOffset(),
543 dex_cache->NumResolvedTypes<kVerifyNone>());
544 FixupDexCacheArray<mirror::MethodDexCacheType>(dex_cache,
545 mirror::DexCache::ResolvedMethodsOffset(),
546 dex_cache->NumResolvedMethods<kVerifyNone>());
547 FixupDexCacheArray<mirror::FieldDexCacheType>(dex_cache,
548 mirror::DexCache::ResolvedFieldsOffset(),
549 dex_cache->NumResolvedFields<kVerifyNone>());
550 FixupDexCacheArray<mirror::MethodTypeDexCacheType>(
551 dex_cache,
552 mirror::DexCache::ResolvedMethodTypesOffset(),
553 dex_cache->NumResolvedMethodTypes<kVerifyNone>());
554 FixupDexCacheArray<GcRoot<mirror::CallSite>>(
555 dex_cache,
556 mirror::DexCache::ResolvedCallSitesOffset(),
557 dex_cache->NumResolvedCallSites<kVerifyNone>());
558 FixupDexCacheArray<GcRoot<mirror::String>>(
559 dex_cache,
560 mirror::DexCache::PreResolvedStringsOffset(),
561 dex_cache->NumPreResolvedStrings<kVerifyNone>());
562 }
563
564 template <bool kMayBeNull = true, typename T>
565 ALWAYS_INLINE void PatchGcRoot(/*inout*/GcRoot<T>* root) const
566 REQUIRES_SHARED(Locks::mutator_lock_) {
567 static_assert(sizeof(GcRoot<mirror::Class*>) == sizeof(uint32_t), "GcRoot size check");
568 T* old_value = root->template Read<kWithoutReadBarrier>();
569 DCHECK(kMayBeNull || old_value != nullptr);
570 if (!kMayBeNull || old_value != nullptr) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800571 *root = GcRoot<T>(heap_visitor_(old_value));
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800572 }
573 }
574
575 template <bool kMayBeNull = true, typename T>
576 ALWAYS_INLINE void PatchNativePointer(/*inout*/T** entry) const {
577 if (kPointerSize == PointerSize::k64) {
578 uint64_t* raw_entry = reinterpret_cast<uint64_t*>(entry);
579 T* old_value = reinterpret_cast64<T*>(*raw_entry);
580 DCHECK(kMayBeNull || old_value != nullptr);
581 if (!kMayBeNull || old_value != nullptr) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800582 T* new_value = native_visitor_(old_value);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800583 *raw_entry = reinterpret_cast64<uint64_t>(new_value);
584 }
585 } else {
586 uint32_t* raw_entry = reinterpret_cast<uint32_t*>(entry);
587 T* old_value = reinterpret_cast32<T*>(*raw_entry);
588 DCHECK(kMayBeNull || old_value != nullptr);
589 if (!kMayBeNull || old_value != nullptr) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800590 T* new_value = native_visitor_(old_value);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800591 *raw_entry = reinterpret_cast32<uint32_t>(new_value);
592 }
593 }
594 }
595
596 template <bool kMayBeNull = true>
Vladimir Markoc524e9e2019-03-26 10:54:50 +0000597 ALWAYS_INLINE void PatchReferenceField(ObjPtr<mirror::Object> object, MemberOffset offset) const
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800598 REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Markoc524e9e2019-03-26 10:54:50 +0000599 ObjPtr<mirror::Object> old_value =
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800600 object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
601 DCHECK(kMayBeNull || old_value != nullptr);
602 if (!kMayBeNull || old_value != nullptr) {
Vladimir Markoc524e9e2019-03-26 10:54:50 +0000603 ObjPtr<mirror::Object> new_value = heap_visitor_(old_value.Ptr());
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800604 object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
605 /*kCheckTransaction=*/ true,
606 kVerifyNone>(offset, new_value);
607 }
608 }
609
610 template <typename T>
611 void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* array, uint32_t index)
612 REQUIRES_SHARED(Locks::mutator_lock_) {
613 static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
614 "Size check for removing std::atomic<>.");
615 PatchGcRoot(&(reinterpret_cast<mirror::DexCachePair<T>*>(array)[index].object));
616 }
617
618 template <typename T>
619 void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* array, uint32_t index)
620 REQUIRES_SHARED(Locks::mutator_lock_) {
621 static_assert(sizeof(std::atomic<mirror::NativeDexCachePair<T>>) ==
622 sizeof(mirror::NativeDexCachePair<T>),
623 "Size check for removing std::atomic<>.");
624 mirror::NativeDexCachePair<T> pair =
625 mirror::DexCache::GetNativePairPtrSize(array, index, kPointerSize);
626 if (pair.object != nullptr) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800627 pair.object = native_visitor_(pair.object);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800628 mirror::DexCache::SetNativePairPtrSize(array, index, pair, kPointerSize);
629 }
630 }
631
632 void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* array, uint32_t index)
633 REQUIRES_SHARED(Locks::mutator_lock_) {
634 PatchGcRoot(&array[index]);
635 }
636
637 void FixupDexCacheArrayEntry(GcRoot<mirror::String>* array, uint32_t index)
638 REQUIRES_SHARED(Locks::mutator_lock_) {
639 PatchGcRoot(&array[index]);
640 }
641
642 template <typename EntryType>
Vladimir Marko423bebb2019-03-26 15:17:21 +0000643 void FixupDexCacheArray(ObjPtr<mirror::DexCache> dex_cache,
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800644 MemberOffset array_offset,
645 uint32_t size) REQUIRES_SHARED(Locks::mutator_lock_) {
646 EntryType* old_array =
647 reinterpret_cast64<EntryType*>(dex_cache->GetField64<kVerifyNone>(array_offset));
648 DCHECK_EQ(old_array != nullptr, size != 0u);
649 if (old_array != nullptr) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800650 EntryType* new_array = native_visitor_(old_array);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800651 dex_cache->SetField64<kVerifyNone>(array_offset, reinterpret_cast64<uint64_t>(new_array));
652 for (uint32_t i = 0; i != size; ++i) {
653 FixupDexCacheArrayEntry(new_array, i);
654 }
655 }
656 }
657
658 private:
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800659 // Heap objects visitor.
660 HeapVisitor heap_visitor_;
661
662 // Native objects visitor.
663 NativeVisitor native_visitor_;
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800664};
665
Mathieu Chartier25602dc2018-12-11 11:31:57 -0800666template <typename ReferenceVisitor>
667class ImageSpace::ClassTableVisitor final {
668 public:
669 explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor)
670 : reference_visitor_(reference_visitor) {}
671
672 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
673 REQUIRES_SHARED(Locks::mutator_lock_) {
674 DCHECK(root->AsMirrorPtr() != nullptr);
675 root->Assign(reference_visitor_(root->AsMirrorPtr()));
676 }
677
678 private:
679 ReferenceVisitor reference_visitor_;
680};
681
Andreas Gampea463b6a2016-08-12 21:53:32 -0700682// Helper class encapsulating loading, so we can access private ImageSpace members (this is a
Vladimir Markoc09cd052018-08-23 16:36:36 +0100683// nested class), but not declare functions in the header.
Vladimir Marko82e1e272018-08-20 13:38:06 +0000684class ImageSpace::Loader {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800685 public:
Vladimir Marko4df2d802018-09-27 16:42:44 +0000686 static std::unique_ptr<ImageSpace> InitAppImage(const char* image_filename,
687 const char* image_location,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000688 const OatFile* oat_file,
689 /*inout*/MemMap* image_reservation,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000690 /*out*/std::string* error_msg)
691 REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100692 TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
Mathieu Chartier3ea43222018-12-08 20:44:50 -0800693
Vladimir Marko4df2d802018-09-27 16:42:44 +0000694 std::unique_ptr<ImageSpace> space = Init(image_filename,
695 image_location,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000696 oat_file,
697 &logger,
698 image_reservation,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000699 error_msg);
700 if (space != nullptr) {
Vladimir Marko7391c8c2018-11-21 17:58:44 +0000701 uint32_t expected_reservation_size =
702 RoundUp(space->GetImageHeader().GetImageSize(), kPageSize);
703 if (!CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
704 !CheckImageComponentCount(*space, /*expected_component_count=*/ 1u, error_msg)) {
705 return nullptr;
706 }
707
Vladimir Marko4df2d802018-09-27 16:42:44 +0000708 TimingLogger::ScopedTiming timing("RelocateImage", &logger);
709 ImageHeader* image_header = reinterpret_cast<ImageHeader*>(space->GetMemMap()->Begin());
Mathieu Chartier25602dc2018-12-11 11:31:57 -0800710 const PointerSize pointer_size = image_header->GetPointerSize();
711 bool result;
712 if (pointer_size == PointerSize::k64) {
713 result = RelocateInPlace<PointerSize::k64>(*image_header,
714 space->GetMemMap()->Begin(),
715 space->GetLiveBitmap(),
716 oat_file,
717 error_msg);
718 } else {
719 result = RelocateInPlace<PointerSize::k32>(*image_header,
720 space->GetMemMap()->Begin(),
721 space->GetLiveBitmap(),
722 oat_file,
723 error_msg);
724 }
725 if (!result) {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000726 return nullptr;
727 }
728 Runtime* runtime = Runtime::Current();
729 CHECK_EQ(runtime->GetResolutionMethod(),
730 image_header->GetImageMethod(ImageHeader::kResolutionMethod));
731 CHECK_EQ(runtime->GetImtConflictMethod(),
732 image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
733 CHECK_EQ(runtime->GetImtUnimplementedMethod(),
734 image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
735 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves),
736 image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod));
737 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly),
738 image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod));
739 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs),
740 image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod));
741 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything),
742 image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
743 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForClinit),
744 image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit));
745 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForSuspendCheck),
746 image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck));
747
748 VLOG(image) << "ImageSpace::Loader::InitAppImage exiting " << *space.get();
Mathieu Chartier3f1fec62018-10-17 09:14:05 -0700749 }
750 if (VLOG_IS_ON(image)) {
751 logger.Dump(LOG_STREAM(INFO));
Vladimir Marko4df2d802018-09-27 16:42:44 +0000752 }
753 return space;
754 }
755
Andreas Gampea463b6a2016-08-12 21:53:32 -0700756 static std::unique_ptr<ImageSpace> Init(const char* image_filename,
757 const char* image_location,
Andreas Gampea463b6a2016-08-12 21:53:32 -0700758 const OatFile* oat_file,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000759 TimingLogger* logger,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100760 /*inout*/MemMap* image_reservation,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100761 /*out*/std::string* error_msg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700762 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700763 CHECK(image_filename != nullptr);
764 CHECK(image_location != nullptr);
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800765
Andreas Gampea463b6a2016-08-12 21:53:32 -0700766 VLOG(image) << "ImageSpace::Init entering image_filename=" << image_filename;
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800767
Andreas Gampea463b6a2016-08-12 21:53:32 -0700768 std::unique_ptr<File> file;
Mathieu Chartier92ec5942016-04-11 12:03:48 -0700769 {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000770 TimingLogger::ScopedTiming timing("OpenImageFile", logger);
Andreas Gampea463b6a2016-08-12 21:53:32 -0700771 file.reset(OS::OpenFileForReading(image_filename));
772 if (file == nullptr) {
773 *error_msg = StringPrintf("Failed to open '%s'", image_filename);
774 return nullptr;
Mathieu Chartier92ec5942016-04-11 12:03:48 -0700775 }
Andreas Gampea463b6a2016-08-12 21:53:32 -0700776 }
777 ImageHeader temp_image_header;
778 ImageHeader* image_header = &temp_image_header;
779 {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000780 TimingLogger::ScopedTiming timing("ReadImageHeader", logger);
Andreas Gampea463b6a2016-08-12 21:53:32 -0700781 bool success = file->ReadFully(image_header, sizeof(*image_header));
782 if (!success || !image_header->IsValid()) {
783 *error_msg = StringPrintf("Invalid image header in '%s'", image_filename);
784 return nullptr;
785 }
786 }
787 // Check that the file is larger or equal to the header size + data size.
788 const uint64_t image_file_size = static_cast<uint64_t>(file->GetLength());
789 if (image_file_size < sizeof(ImageHeader) + image_header->GetDataSize()) {
Mathieu Chartier1a842962018-11-13 15:09:51 -0800790 *error_msg = StringPrintf(
791 "Image file truncated: %" PRIu64 " vs. %" PRIu64 ".",
792 image_file_size,
793 static_cast<uint64_t>(sizeof(ImageHeader) + image_header->GetDataSize()));
Andreas Gampea463b6a2016-08-12 21:53:32 -0700794 return nullptr;
795 }
796
797 if (oat_file != nullptr) {
Vladimir Marko312f10e2018-11-21 12:35:24 +0000798 // If we have an oat file (i.e. for app image), check the oat file checksum.
799 // Otherwise, we open the oat file after the image and check the checksum there.
Andreas Gampea463b6a2016-08-12 21:53:32 -0700800 const uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
801 const uint32_t image_oat_checksum = image_header->GetOatChecksum();
802 if (oat_checksum != image_oat_checksum) {
803 *error_msg = StringPrintf("Oat checksum 0x%x does not match the image one 0x%x in image %s",
804 oat_checksum,
805 image_oat_checksum,
806 image_filename);
807 return nullptr;
Mathieu Chartier92ec5942016-04-11 12:03:48 -0700808 }
809 }
810
Andreas Gampea463b6a2016-08-12 21:53:32 -0700811 if (VLOG_IS_ON(startup)) {
812 LOG(INFO) << "Dumping image sections";
813 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
814 const auto section_idx = static_cast<ImageHeader::ImageSections>(i);
815 auto& section = image_header->GetImageSection(section_idx);
816 LOG(INFO) << section_idx << " start="
817 << reinterpret_cast<void*>(image_header->GetImageBegin() + section.Offset()) << " "
818 << section;
Mathieu Chartier92ec5942016-04-11 12:03:48 -0700819 }
Andreas Gampea463b6a2016-08-12 21:53:32 -0700820 }
821
Vladimir Markocd87c3e2017-09-05 13:11:57 +0100822 const auto& bitmap_section = image_header->GetImageBitmapSection();
Andreas Gampea463b6a2016-08-12 21:53:32 -0700823 // The location we want to map from is the first aligned page after the end of the stored
824 // (possibly compressed) data.
825 const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
826 kPageSize);
827 const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size();
Vladimir Markod68ab242018-10-18 16:07:10 +0100828 if (end_of_bitmap != image_file_size) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700829 *error_msg = StringPrintf(
Vladimir Markod68ab242018-10-18 16:07:10 +0100830 "Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.",
Vladimir Marko6121aa62018-07-06 10:04:35 +0100831 image_file_size,
Vladimir Markod68ab242018-10-18 16:07:10 +0100832 end_of_bitmap);
Andreas Gampea463b6a2016-08-12 21:53:32 -0700833 return nullptr;
834 }
835
Andreas Gampea463b6a2016-08-12 21:53:32 -0700836 // GetImageBegin is the preferred address to map the image. If we manage to map the
837 // image at the image begin, the amount of fixup work required is minimized.
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800838 // If it is pic we will retry with error_msg for the2 failure case. Pass a null error_msg to
Mathieu Chartier66b1d572017-02-10 18:41:39 -0800839 // avoid reading proc maps for a mapping failure and slowing everything down.
Vladimir Markoc09cd052018-08-23 16:36:36 +0100840 // For the boot image, we have already reserved the memory and we load the image
841 // into the `image_reservation`.
Vladimir Marko312f10e2018-11-21 12:35:24 +0000842 MemMap map = LoadImageFile(
Vladimir Markoc09cd052018-08-23 16:36:36 +0100843 image_filename,
844 image_location,
845 *image_header,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100846 file->Fd(),
847 logger,
848 image_reservation,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000849 error_msg);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100850 if (!map.IsValid()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700851 DCHECK(!error_msg->empty());
852 return nullptr;
853 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100854 DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
Andreas Gampea463b6a2016-08-12 21:53:32 -0700855
Vladimir Markoc09cd052018-08-23 16:36:36 +0100856 MemMap image_bitmap_map = MemMap::MapFile(bitmap_section.Size(),
Vladimir Marko4df2d802018-09-27 16:42:44 +0000857 PROT_READ,
858 MAP_PRIVATE,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100859 file->Fd(),
860 image_bitmap_offset,
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100861 /*low_4gb=*/ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100862 image_filename,
863 error_msg);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100864 if (!image_bitmap_map.IsValid()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700865 *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
866 return nullptr;
867 }
868 // Loaded the map, use the image header from the file now in case we patch it with
869 // RelocateInPlace.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100870 image_header = reinterpret_cast<ImageHeader*>(map.Begin());
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700871 const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1);
Andreas Gampea463b6a2016-08-12 21:53:32 -0700872 std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
873 image_filename,
874 bitmap_index));
875 // Bitmap only needs to cover until the end of the mirror objects section.
Vladimir Markocd87c3e2017-09-05 13:11:57 +0100876 const ImageSection& image_objects = image_header->GetObjectsSection();
Andreas Gampea463b6a2016-08-12 21:53:32 -0700877 // We only want the mirror object, not the ArtFields and ArtMethods.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100878 uint8_t* const image_end = map.Begin() + image_objects.End();
Mathieu Chartier6f382012019-07-30 09:47:35 -0700879 accounting::ContinuousSpaceBitmap bitmap;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700880 {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000881 TimingLogger::ScopedTiming timing("CreateImageBitmap", logger);
Mathieu Chartier6f382012019-07-30 09:47:35 -0700882 bitmap = accounting::ContinuousSpaceBitmap::CreateFromMemMap(
883 bitmap_name,
884 std::move(image_bitmap_map),
885 reinterpret_cast<uint8_t*>(map.Begin()),
886 // Make sure the bitmap is aligned to card size instead of just bitmap word size.
887 RoundUp(image_objects.End(), gc::accounting::CardTable::kCardSize));
888 if (!bitmap.IsValid()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700889 *error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
890 return nullptr;
891 }
892 }
Andreas Gampea463b6a2016-08-12 21:53:32 -0700893 // We only want the mirror object, not the ArtFields and ArtMethods.
894 std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
895 image_location,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100896 std::move(map),
Vladimir Markoc09cd052018-08-23 16:36:36 +0100897 std::move(bitmap),
Andreas Gampea463b6a2016-08-12 21:53:32 -0700898 image_end));
Vladimir Marko312f10e2018-11-21 12:35:24 +0000899 space->oat_file_non_owned_ = oat_file;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700900 return space;
901 }
902
Vladimir Marko7391c8c2018-11-21 17:58:44 +0000903 static bool CheckImageComponentCount(const ImageSpace& space,
904 uint32_t expected_component_count,
905 /*out*/std::string* error_msg) {
906 const ImageHeader& header = space.GetImageHeader();
907 if (header.GetComponentCount() != expected_component_count) {
908 *error_msg = StringPrintf("Unexpected component count in %s, received %u, expected %u",
909 space.GetImageFilename().c_str(),
910 header.GetComponentCount(),
911 expected_component_count);
912 return false;
913 }
914 return true;
915 }
916
917 static bool CheckImageReservationSize(const ImageSpace& space,
918 uint32_t expected_reservation_size,
919 /*out*/std::string* error_msg) {
920 const ImageHeader& header = space.GetImageHeader();
921 if (header.GetImageReservationSize() != expected_reservation_size) {
922 *error_msg = StringPrintf("Unexpected reservation size in %s, received %u, expected %u",
923 space.GetImageFilename().c_str(),
924 header.GetImageReservationSize(),
925 expected_reservation_size);
926 return false;
927 }
928 return true;
929 }
930
Andreas Gampea463b6a2016-08-12 21:53:32 -0700931 private:
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100932 static MemMap LoadImageFile(const char* image_filename,
933 const char* image_location,
934 const ImageHeader& image_header,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100935 int fd,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000936 TimingLogger* logger,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100937 /*inout*/MemMap* image_reservation,
Mathieu Chartierf1890fd2019-04-30 17:35:08 -0700938 /*out*/std::string* error_msg)
939 REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000940 TimingLogger::ScopedTiming timing("MapImageFile", logger);
Mathieu Chartier1a842962018-11-13 15:09:51 -0800941 std::string temp_error_msg;
942 const bool is_compressed = image_header.HasCompressedBlock();
943 if (!is_compressed) {
Vladimir Marko11306592018-10-26 14:22:59 +0100944 uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700945 return MemMap::MapFileAtAddress(address,
946 image_header.GetImageSize(),
947 PROT_READ | PROT_WRITE,
948 MAP_PRIVATE,
949 fd,
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100950 /*start=*/ 0,
951 /*low_4gb=*/ true,
Andreas Gampea463b6a2016-08-12 21:53:32 -0700952 image_filename,
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100953 /*reuse=*/ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100954 image_reservation,
Andreas Gampea463b6a2016-08-12 21:53:32 -0700955 error_msg);
956 }
957
Andreas Gampea463b6a2016-08-12 21:53:32 -0700958 // Reserve output and decompress into it.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100959 MemMap map = MemMap::MapAnonymous(image_location,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100960 image_header.GetImageSize(),
961 PROT_READ | PROT_WRITE,
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100962 /*low_4gb=*/ true,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100963 image_reservation,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100964 error_msg);
965 if (map.IsValid()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700966 const size_t stored_size = image_header.GetDataSize();
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100967 MemMap temp_map = MemMap::MapFile(sizeof(ImageHeader) + stored_size,
968 PROT_READ,
969 MAP_PRIVATE,
970 fd,
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100971 /*start=*/ 0,
972 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100973 image_filename,
974 error_msg);
975 if (!temp_map.IsValid()) {
Mathieu Chartier66b1d572017-02-10 18:41:39 -0800976 DCHECK(error_msg == nullptr || !error_msg->empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100977 return MemMap::Invalid();
Andreas Gampea463b6a2016-08-12 21:53:32 -0700978 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100979 memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800980
Mathieu Chartierada33d72018-12-17 13:17:30 -0800981 Runtime::ScopedThreadPoolUsage stpu;
982 ThreadPool* const pool = stpu.GetThreadPool();
Andreas Gampea463b6a2016-08-12 21:53:32 -0700983 const uint64_t start = NanoTime();
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800984 Thread* const self = Thread::Current();
Mathieu Chartierada33d72018-12-17 13:17:30 -0800985 static constexpr size_t kMinBlocks = 2u;
986 const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
Mathieu Chartier1a842962018-11-13 15:09:51 -0800987 for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800988 auto function = [&](Thread*) {
989 const uint64_t start2 = NanoTime();
990 ScopedTrace trace("LZ4 decompress block");
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800991 bool result = block.Decompress(/*out_ptr=*/map.Begin(),
992 /*in_ptr=*/temp_map.Begin(),
993 error_msg);
994 if (!result && error_msg != nullptr) {
995 *error_msg = "Failed to decompress image block " + *error_msg;
Mathieu Chartier1a842962018-11-13 15:09:51 -0800996 }
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800997 VLOG(image) << "Decompress block " << block.GetDataSize() << " -> "
998 << block.GetImageSize() << " in " << PrettyDuration(NanoTime() - start2);
999 };
1000 if (use_parallel) {
1001 pool->AddTask(self, new FunctionTask(std::move(function)));
1002 } else {
1003 function(self);
Mathieu Chartier1a842962018-11-13 15:09:51 -08001004 }
1005 }
Mathieu Chartierc6068c72018-11-13 16:00:58 -08001006 if (use_parallel) {
1007 ScopedTrace trace("Waiting for workers");
Mathieu Chartierf1890fd2019-04-30 17:35:08 -07001008 // Go to native since we don't want to suspend while holding the mutator lock.
1009 ScopedThreadSuspension sts(Thread::Current(), kNative);
Mathieu Chartierc6068c72018-11-13 16:00:58 -08001010 pool->Wait(self, true, false);
1011 }
Mathieu Chartier0d4d2912017-02-10 17:22:41 -08001012 const uint64_t time = NanoTime() - start;
1013 // Add one 1 ns to prevent possible divide by 0.
1014 VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
Vladimir Markoc34bebf2018-08-16 16:12:49 +01001015 << PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
Mathieu Chartier0d4d2912017-02-10 17:22:41 -08001016 << "/s)";
Andreas Gampea463b6a2016-08-12 21:53:32 -07001017 }
1018
Vladimir Markoc34bebf2018-08-16 16:12:49 +01001019 return map;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001020 }
1021
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001022 class EmptyRange {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001023 public:
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001024 ALWAYS_INLINE bool InSource(uintptr_t) const { return false; }
1025 ALWAYS_INLINE bool InDest(uintptr_t) const { return false; }
1026 ALWAYS_INLINE uintptr_t ToDest(uintptr_t) const { UNREACHABLE(); }
1027 };
1028
1029 template <typename Range0, typename Range1 = EmptyRange, typename Range2 = EmptyRange>
1030 class ForwardAddress {
1031 public:
1032 ForwardAddress(const Range0& range0 = Range0(),
1033 const Range1& range1 = Range1(),
1034 const Range2& range2 = Range2())
1035 : range0_(range0), range1_(range1), range2_(range2) {}
Andreas Gampea463b6a2016-08-12 21:53:32 -07001036
1037 // Return the relocated address of a heap object.
Mathieu Chartier9a3da9a2018-12-21 12:56:55 -08001038 // Null checks must be performed in the caller (for performance reasons).
Andreas Gampea463b6a2016-08-12 21:53:32 -07001039 template <typename T>
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001040 ALWAYS_INLINE T* operator()(T* src) const {
Mathieu Chartier9a3da9a2018-12-21 12:56:55 -08001041 DCHECK(src != nullptr);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001042 const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001043 if (range2_.InSource(uint_src)) {
1044 return reinterpret_cast<T*>(range2_.ToDest(uint_src));
Andreas Gampea463b6a2016-08-12 21:53:32 -07001045 }
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001046 if (range1_.InSource(uint_src)) {
1047 return reinterpret_cast<T*>(range1_.ToDest(uint_src));
Andreas Gampea463b6a2016-08-12 21:53:32 -07001048 }
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001049 CHECK(range0_.InSource(uint_src))
1050 << reinterpret_cast<const void*>(src) << " not in "
1051 << reinterpret_cast<const void*>(range0_.Source()) << "-"
1052 << reinterpret_cast<const void*>(range0_.Source() + range0_.Length());
1053 return reinterpret_cast<T*>(range0_.ToDest(uint_src));
Andreas Gampea463b6a2016-08-12 21:53:32 -07001054 }
1055
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001056 private:
1057 const Range0 range0_;
1058 const Range1 range1_;
1059 const Range2 range2_;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001060 };
1061
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001062 template <typename Forward>
1063 class FixupRootVisitor {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001064 public:
1065 template<typename... Args>
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001066 explicit FixupRootVisitor(Args... args) : forward_(args...) {}
Andreas Gampea463b6a2016-08-12 21:53:32 -07001067
1068 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001069 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001070 if (!root->IsNull()) {
1071 VisitRoot(root);
1072 }
1073 }
1074
1075 ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001076 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001077 mirror::Object* ref = root->AsMirrorPtr();
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001078 mirror::Object* new_ref = forward_(ref);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001079 if (ref != new_ref) {
1080 root->Assign(new_ref);
1081 }
1082 }
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001083
1084 private:
1085 Forward forward_;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001086 };
1087
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001088 template <typename Forward>
1089 class FixupObjectVisitor {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001090 public:
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001091 explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited,
1092 const Forward& forward)
1093 : visited_(visited), forward_(forward) {}
Andreas Gampea463b6a2016-08-12 21:53:32 -07001094
1095 // Fix up separately since we also need to fix up method entrypoints.
1096 ALWAYS_INLINE void VisitRootIfNonNull(
1097 mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1098
1099 ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1100 const {}
1101
Mathieu Chartier31e88222016-10-14 18:43:19 -07001102 ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
Andreas Gampea463b6a2016-08-12 21:53:32 -07001103 MemberOffset offset,
1104 bool is_static ATTRIBUTE_UNUSED) const
1105 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001106 // Space is not yet added to the heap, don't do a read barrier.
1107 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
1108 offset);
Mathieu Chartier9a3da9a2018-12-21 12:56:55 -08001109 if (ref != nullptr) {
1110 // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
1111 // image.
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001112 obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, forward_(ref));
Mathieu Chartier9a3da9a2018-12-21 12:56:55 -08001113 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001114 }
1115
1116 // java.lang.ref.Reference visitor.
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001117 ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001118 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001119 DCHECK(klass->IsTypeOfReferenceClass());
1120 this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001121 }
1122
Mathieu Chartier8c19d242017-03-06 12:35:10 -08001123 void operator()(mirror::Object* obj) const
1124 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001125 if (!visited_->Set(obj)) {
1126 // Not already visited.
1127 obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
1128 *this,
1129 *this);
1130 CHECK(!obj->IsClass());
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001131 }
1132 }
Mathieu Chartier91edc622016-02-16 17:16:01 -08001133
Andreas Gampea463b6a2016-08-12 21:53:32 -07001134 private:
Andreas Gampea463b6a2016-08-12 21:53:32 -07001135 gc::accounting::ContinuousSpaceBitmap* const visited_;
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001136 Forward forward_;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001137 };
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001138
Andreas Gampea463b6a2016-08-12 21:53:32 -07001139 // Relocate an image space mapped at target_base which possibly used to be at a different base
Vladimir Marko4df2d802018-09-27 16:42:44 +00001140 // address. In place means modifying a single ImageSpace in place rather than relocating from
1141 // one ImageSpace to another.
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001142 template <PointerSize kPointerSize>
Andreas Gampea463b6a2016-08-12 21:53:32 -07001143 static bool RelocateInPlace(ImageHeader& image_header,
1144 uint8_t* target_base,
1145 accounting::ContinuousSpaceBitmap* bitmap,
1146 const OatFile* app_oat_file,
1147 std::string* error_msg) {
1148 DCHECK(error_msg != nullptr);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001149 // Set up sections.
Andreas Gampea463b6a2016-08-12 21:53:32 -07001150 gc::Heap* const heap = Runtime::Current()->GetHeap();
Vladimir Marko1aff1ef2019-07-02 15:12:50 +01001151 uint32_t boot_image_begin = heap->GetBootImagesStartAddress();
1152 uint32_t boot_image_size = heap->GetBootImagesSize();
1153 if (boot_image_size == 0u) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001154 *error_msg = "Can not relocate app image without boot image space";
1155 return false;
1156 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001157 const uint32_t image_header_boot_image_size = image_header.GetBootImageSize();
Andreas Gampea463b6a2016-08-12 21:53:32 -07001158 if (boot_image_size != image_header_boot_image_size) {
1159 *error_msg = StringPrintf("Boot image size %" PRIu64 " does not match expected size %"
1160 PRIu64,
1161 static_cast<uint64_t>(boot_image_size),
1162 static_cast<uint64_t>(image_header_boot_image_size));
1163 return false;
1164 }
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001165 const ImageSection& objects_section = image_header.GetObjectsSection();
1166 // Where the app image objects are mapped to.
1167 uint8_t* objects_location = target_base + objects_section.Offset();
Andreas Gampea463b6a2016-08-12 21:53:32 -07001168 TimingLogger logger(__FUNCTION__, true, false);
1169 RelocationRange boot_image(image_header.GetBootImageBegin(),
1170 boot_image_begin,
1171 boot_image_size);
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001172 // Metadata is everything after the objects section, use exclusion to be safe.
1173 RelocationRange app_image_metadata(
1174 reinterpret_cast<uintptr_t>(image_header.GetImageBegin()) + objects_section.End(),
1175 reinterpret_cast<uintptr_t>(target_base) + objects_section.End(),
1176 image_header.GetImageSize() - objects_section.End());
1177 // App image heap objects, may be mapped in the heap.
1178 RelocationRange app_image_objects(
1179 reinterpret_cast<uintptr_t>(image_header.GetImageBegin()) + objects_section.Offset(),
1180 reinterpret_cast<uintptr_t>(objects_location),
1181 objects_section.Size());
Andreas Gampea463b6a2016-08-12 21:53:32 -07001182 // Use the oat data section since this is where the OatFile::Begin is.
1183 RelocationRange app_oat(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()),
1184 // Not necessarily in low 4GB.
1185 reinterpret_cast<uintptr_t>(app_oat_file->Begin()),
1186 image_header.GetOatDataEnd() - image_header.GetOatDataBegin());
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001187 VLOG(image) << "App image metadata " << app_image_metadata;
1188 VLOG(image) << "App image objects " << app_image_objects;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001189 VLOG(image) << "App oat " << app_oat;
1190 VLOG(image) << "Boot image " << boot_image;
Vladimir Marko0c78ef72018-11-21 14:09:35 +00001191 // True if we need to fixup any heap pointers.
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001192 const bool fixup_image = boot_image.Delta() != 0 || app_image_metadata.Delta() != 0 ||
1193 app_image_objects.Delta() != 0;
Vladimir Marko0c78ef72018-11-21 14:09:35 +00001194 if (!fixup_image) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001195 // Nothing to fix up.
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001196 return true;
1197 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001198 ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001199
1200 using ForwardObject = ForwardAddress<RelocationRange, RelocationRange>;
1201 ForwardObject forward_object(boot_image, app_image_objects);
1202 ForwardObject forward_metadata(boot_image, app_image_metadata);
1203 using ForwardCode = ForwardAddress<RelocationRange, RelocationRange>;
1204 ForwardCode forward_code(boot_image, app_oat);
1205 PatchObjectVisitor<kPointerSize, ForwardObject, ForwardCode> patch_object_visitor(
1206 forward_object,
1207 forward_metadata);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001208 if (fixup_image) {
1209 // Two pass approach, fix up all classes first, then fix up non class-objects.
1210 // The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
Mathieu Chartier6f382012019-07-30 09:47:35 -07001211 gc::accounting::ContinuousSpaceBitmap visited_bitmap(
Andreas Gampea463b6a2016-08-12 21:53:32 -07001212 gc::accounting::ContinuousSpaceBitmap::Create("Relocate bitmap",
1213 target_base,
1214 image_header.GetImageSize()));
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001215 {
1216 TimingLogger::ScopedTiming timing("Fixup classes", &logger);
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001217 ObjPtr<mirror::Class> class_class = [&]() NO_THREAD_SAFETY_ANALYSIS {
1218 ObjPtr<mirror::ObjectArray<mirror::Object>> image_roots = app_image_objects.ToDest(
1219 image_header.GetImageRoots<kWithoutReadBarrier>().Ptr());
1220 int32_t class_roots_index = enum_cast<int32_t>(ImageHeader::kClassRoots);
1221 DCHECK_LT(class_roots_index, image_roots->GetLength<kVerifyNone>());
1222 ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
1223 ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(boot_image.ToDest(
1224 image_roots->GetWithoutChecks<kVerifyNone>(class_roots_index).Ptr()));
1225 return GetClassRoot<mirror::Class, kWithoutReadBarrier>(class_roots);
1226 }();
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001227 const auto& class_table_section = image_header.GetClassTableSection();
1228 if (class_table_section.Size() > 0u) {
1229 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001230 ClassTableVisitor class_table_visitor(forward_object);
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001231 size_t read_count = 0u;
1232 const uint8_t* data = target_base + class_table_section.Offset();
1233 // We avoid making a copy of the data since we want modifications to be propagated to the
1234 // memory map.
1235 ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
1236 for (ClassTable::TableSlot& slot : temp_set) {
1237 slot.VisitRoot(class_table_visitor);
Vladimir Marko1fe58392019-04-10 16:14:56 +01001238 ObjPtr<mirror::Class> klass = slot.Read<kWithoutReadBarrier>();
1239 if (!app_image_objects.InDest(klass.Ptr())) {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001240 continue;
1241 }
Mathieu Chartier6f382012019-07-30 09:47:35 -07001242 const bool already_marked = visited_bitmap.Set(klass.Ptr());
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001243 CHECK(!already_marked) << "App image class already visited";
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001244 patch_object_visitor.VisitClass(klass, class_class);
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001245 // Then patch the non-embedded vtable and iftable.
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001246 ObjPtr<mirror::PointerArray> vtable =
1247 klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001248 if (vtable != nullptr &&
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001249 app_image_objects.InDest(vtable.Ptr()) &&
Mathieu Chartier6f382012019-07-30 09:47:35 -07001250 !visited_bitmap.Set(vtable.Ptr())) {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001251 patch_object_visitor.VisitPointerArray(vtable);
1252 }
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001253 ObjPtr<mirror::IfTable> iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
1254 if (iftable != nullptr && app_image_objects.InDest(iftable.Ptr())) {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001255 // Avoid processing the fields of iftable since we will process them later anyways
1256 // below.
1257 int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
1258 for (int32_t i = 0; i != ifcount; ++i) {
Vladimir Marko557fece2019-03-26 14:29:41 +00001259 ObjPtr<mirror::PointerArray> unpatched_ifarray =
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001260 iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
1261 if (unpatched_ifarray != nullptr) {
1262 // The iftable has not been patched, so we need to explicitly adjust the pointer.
Vladimir Marko557fece2019-03-26 14:29:41 +00001263 ObjPtr<mirror::PointerArray> ifarray = forward_object(unpatched_ifarray.Ptr());
1264 if (app_image_objects.InDest(ifarray.Ptr()) &&
Mathieu Chartier6f382012019-07-30 09:47:35 -07001265 !visited_bitmap.Set(ifarray.Ptr())) {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001266 patch_object_visitor.VisitPointerArray(ifarray);
1267 }
1268 }
1269 }
1270 }
1271 }
1272 }
1273 }
1274
1275 // Fixup objects may read fields in the boot image, use the mutator lock here for sanity.
1276 // Though its probably not required.
Mathieu Chartier2ffc74b2019-01-03 19:25:41 -08001277 TimingLogger::ScopedTiming timing("Fixup objects", &logger);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001278 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001279 // Need to update the image to be at the target base.
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001280 uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
1281 uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
Mathieu Chartier6f382012019-07-30 09:47:35 -07001282 FixupObjectVisitor<ForwardObject> fixup_object_visitor(&visited_bitmap, forward_object);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001283 bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
1284 // Fixup image roots.
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001285 CHECK(app_image_objects.InSource(reinterpret_cast<uintptr_t>(
Vladimir Markoc13fbd82018-06-04 16:16:28 +01001286 image_header.GetImageRoots<kWithoutReadBarrier>().Ptr())));
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001287 image_header.RelocateImageReferences(app_image_objects.Delta());
1288 image_header.RelocateBootImageReferences(boot_image.Delta());
Andreas Gampea463b6a2016-08-12 21:53:32 -07001289 CHECK_EQ(image_header.GetImageBegin(), target_base);
1290 // Fix up dex cache DexFile pointers.
Vladimir Marko4617d582019-03-28 13:48:31 +00001291 ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
1292 image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kDexCaches)
1293 ->AsObjectArray<mirror::DexCache, kVerifyNone>();
Andreas Gampea463b6a2016-08-12 21:53:32 -07001294 for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
Vladimir Marko423bebb2019-03-26 15:17:21 +00001295 ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001296 CHECK(dex_cache != nullptr);
1297 patch_object_visitor.VisitDexCacheArrays(dex_cache);
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001298 }
1299 }
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001300 {
1301 // Only touches objects in the app image, no need for mutator lock.
Andreas Gampea463b6a2016-08-12 21:53:32 -07001302 TimingLogger::ScopedTiming timing("Fixup methods", &logger);
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001303 image_header.VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
1304 // TODO: Consider a separate visitor for runtime vs normal methods.
1305 if (UNLIKELY(method.IsRuntimeMethod())) {
1306 ImtConflictTable* table = method.GetImtConflictTable(kPointerSize);
1307 if (table != nullptr) {
1308 ImtConflictTable* new_table = forward_metadata(table);
1309 if (table != new_table) {
1310 method.SetImtConflictTable(new_table, kPointerSize);
1311 }
1312 }
1313 const void* old_code = method.GetEntryPointFromQuickCompiledCodePtrSize(kPointerSize);
1314 const void* new_code = forward_code(old_code);
1315 if (old_code != new_code) {
1316 method.SetEntryPointFromQuickCompiledCodePtrSize(new_code, kPointerSize);
1317 }
1318 } else {
Vladimir Marko2180d8e2019-04-11 14:22:53 +01001319 patch_object_visitor.PatchGcRoot(&method.DeclaringClassRoot());
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001320 method.UpdateEntrypoints(forward_code, kPointerSize);
1321 }
1322 }, target_base, kPointerSize);
Mathieu Chartiere42888f2016-04-14 10:49:19 -07001323 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001324 if (fixup_image) {
1325 {
1326 // Only touches objects in the app image, no need for mutator lock.
1327 TimingLogger::ScopedTiming timing("Fixup fields", &logger);
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001328 image_header.VisitPackedArtFields([&](ArtField& field) NO_THREAD_SAFETY_ANALYSIS {
Vladimir Marko2180d8e2019-04-11 14:22:53 +01001329 patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(
1330 &field.DeclaringClassRoot());
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001331 }, target_base);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001332 }
1333 {
1334 TimingLogger::ScopedTiming timing("Fixup imt", &logger);
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001335 image_header.VisitPackedImTables(forward_metadata, target_base, kPointerSize);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001336 }
1337 {
1338 TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001339 image_header.VisitPackedImtConflictTables(forward_metadata, target_base, kPointerSize);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001340 }
Mathieu Chartier74ccee62018-10-10 10:30:29 -07001341 // Fix up the intern table.
1342 const auto& intern_table_section = image_header.GetInternedStringsSection();
1343 if (intern_table_section.Size() > 0u) {
1344 TimingLogger::ScopedTiming timing("Fixup intern table", &logger);
1345 ScopedObjectAccess soa(Thread::Current());
1346 // Fixup the pointers in the newly written intern table to contain image addresses.
1347 InternTable temp_intern_table;
1348 // Note that we require that ReadFromMemory does not make an internal copy of the elements
1349 // so that the VisitRoots() will update the memory directly rather than the copies.
Mathieu Chartier74ccee62018-10-10 10:30:29 -07001350 temp_intern_table.AddTableFromMemory(target_base + intern_table_section.Offset(),
1351 [&](InternTable::UnorderedSet& strings)
1352 REQUIRES_SHARED(Locks::mutator_lock_) {
1353 for (GcRoot<mirror::String>& root : strings) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001354 root = GcRoot<mirror::String>(forward_object(root.Read<kWithoutReadBarrier>()));
Mathieu Chartier74ccee62018-10-10 10:30:29 -07001355 }
Mathieu Chartier8cc418e2018-10-31 10:54:30 -07001356 }, /*is_boot_image=*/ false);
Mathieu Chartier74ccee62018-10-10 10:30:29 -07001357 }
Artem Udovichenkoa62cb9b2016-06-30 09:18:25 +00001358 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001359 if (VLOG_IS_ON(image)) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001360 logger.Dump(LOG_STREAM(INFO));
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001361 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001362 return true;
Andreas Gampe7fa55782016-06-15 17:45:01 -07001363 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001364};
Hiroshi Yamauchibd0fb612014-05-20 13:46:00 -07001365
Vladimir Marko82e1e272018-08-20 13:38:06 +00001366class ImageSpace::BootImageLoader {
1367 public:
Vladimir Marko91f10322018-12-07 18:04:10 +00001368 BootImageLoader(const std::vector<std::string>& boot_class_path,
1369 const std::vector<std::string>& boot_class_path_locations,
1370 const std::string& image_location,
Vladimir Marko3364d182019-03-13 13:55:01 +00001371 InstructionSet image_isa,
1372 bool relocate,
1373 bool executable,
1374 bool is_zygote)
Vladimir Marko91f10322018-12-07 18:04:10 +00001375 : boot_class_path_(boot_class_path),
1376 boot_class_path_locations_(boot_class_path_locations),
1377 image_location_(image_location),
Vladimir Marko82e1e272018-08-20 13:38:06 +00001378 image_isa_(image_isa),
Vladimir Marko3364d182019-03-13 13:55:01 +00001379 relocate_(relocate),
1380 executable_(executable),
1381 is_zygote_(is_zygote),
Vladimir Marko82e1e272018-08-20 13:38:06 +00001382 has_system_(false),
1383 has_cache_(false),
1384 is_global_cache_(true),
Vladimir Markoe3070022018-08-22 09:36:19 +00001385 dalvik_cache_exists_(false),
Vladimir Marko82e1e272018-08-20 13:38:06 +00001386 dalvik_cache_(),
1387 cache_filename_() {
1388 }
1389
1390 bool IsZygote() const { return is_zygote_; }
1391
1392 void FindImageFiles() {
1393 std::string system_filename;
Vladimir Marko82e1e272018-08-20 13:38:06 +00001394 bool found_image = FindImageFilenameImpl(image_location_.c_str(),
1395 image_isa_,
1396 &has_system_,
1397 &system_filename,
Vladimir Markoe3070022018-08-22 09:36:19 +00001398 &dalvik_cache_exists_,
Vladimir Marko82e1e272018-08-20 13:38:06 +00001399 &dalvik_cache_,
1400 &is_global_cache_,
1401 &has_cache_,
1402 &cache_filename_);
Vladimir Markoe3070022018-08-22 09:36:19 +00001403 DCHECK(!dalvik_cache_exists_ || !dalvik_cache_.empty());
Vladimir Marko82e1e272018-08-20 13:38:06 +00001404 DCHECK_EQ(found_image, has_system_ || has_cache_);
1405 }
1406
1407 bool HasSystem() const { return has_system_; }
1408 bool HasCache() const { return has_cache_; }
1409
Vladimir Markoe3070022018-08-22 09:36:19 +00001410 bool DalvikCacheExists() const { return dalvik_cache_exists_; }
Vladimir Marko82e1e272018-08-20 13:38:06 +00001411 bool IsGlobalCache() const { return is_global_cache_; }
1412
1413 const std::string& GetDalvikCache() const {
Vladimir Marko82e1e272018-08-20 13:38:06 +00001414 return dalvik_cache_;
1415 }
1416
1417 const std::string& GetCacheFilename() const {
Vladimir Marko82e1e272018-08-20 13:38:06 +00001418 return cache_filename_;
1419 }
1420
Andreas Gampe86823542019-02-25 09:38:49 -08001421 bool LoadFromSystem(bool validate_oat_file,
1422 size_t extra_reservation_size,
Vladimir Markod44d7032018-08-30 13:02:31 +01001423 /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1424 /*out*/MemMap* extra_reservation,
1425 /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001426 TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
Vladimir Marko82e1e272018-08-20 13:38:06 +00001427 std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
Vladimir Markoc09cd052018-08-23 16:36:36 +01001428
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001429 if (!LoadFromFile(filename,
Andreas Gampe86823542019-02-25 09:38:49 -08001430 validate_oat_file,
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001431 extra_reservation_size,
1432 &logger,
1433 boot_image_spaces,
1434 extra_reservation,
1435 error_msg)) {
Vladimir Markoc09cd052018-08-23 16:36:36 +01001436 return false;
1437 }
1438
Vladimir Marko4df2d802018-09-27 16:42:44 +00001439 if (VLOG_IS_ON(image)) {
Vladimir Marko312f10e2018-11-21 12:35:24 +00001440 LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromSystem exiting "
1441 << boot_image_spaces->front();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001442 logger.Dump(LOG_STREAM(INFO));
1443 }
Vladimir Marko82e1e272018-08-20 13:38:06 +00001444 return true;
1445 }
1446
1447 bool LoadFromDalvikCache(
Vladimir Marko82e1e272018-08-20 13:38:06 +00001448 bool validate_oat_file,
Vladimir Markod44d7032018-08-30 13:02:31 +01001449 size_t extra_reservation_size,
1450 /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1451 /*out*/MemMap* extra_reservation,
1452 /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001453 TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
Vladimir Marko82e1e272018-08-20 13:38:06 +00001454 DCHECK(DalvikCacheExists());
Vladimir Markoc09cd052018-08-23 16:36:36 +01001455
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001456 if (!LoadFromFile(cache_filename_,
1457 validate_oat_file,
1458 extra_reservation_size,
1459 &logger,
1460 boot_image_spaces,
1461 extra_reservation,
1462 error_msg)) {
Vladimir Markoc09cd052018-08-23 16:36:36 +01001463 return false;
1464 }
1465
Vladimir Marko4df2d802018-09-27 16:42:44 +00001466 if (VLOG_IS_ON(image)) {
Vladimir Marko312f10e2018-11-21 12:35:24 +00001467 LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromDalvikCache exiting "
1468 << boot_image_spaces->front();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001469 logger.Dump(LOG_STREAM(INFO));
1470 }
Vladimir Marko82e1e272018-08-20 13:38:06 +00001471 return true;
1472 }
1473
1474 private:
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001475 bool LoadFromFile(
1476 const std::string& filename,
1477 bool validate_oat_file,
1478 size_t extra_reservation_size,
1479 TimingLogger* logger,
1480 /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1481 /*out*/MemMap* extra_reservation,
1482 /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
1483 ImageHeader system_hdr;
1484 if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
1485 *error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
1486 return false;
1487 }
Vladimir Marko0ace5632018-12-14 11:11:47 +00001488 if (system_hdr.GetComponentCount() == 0u ||
1489 system_hdr.GetComponentCount() > boot_class_path_.size()) {
1490 *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
1491 "expected non-zero and <= %zu",
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001492 filename.c_str(),
1493 system_hdr.GetComponentCount(),
1494 boot_class_path_.size());
1495 return false;
1496 }
1497 MemMap image_reservation;
1498 MemMap local_extra_reservation;
1499 if (!ReserveBootImageMemory(system_hdr.GetImageReservationSize(),
1500 reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin()),
1501 extra_reservation_size,
1502 &image_reservation,
1503 &local_extra_reservation,
1504 error_msg)) {
1505 return false;
1506 }
1507
Vladimir Marko0ace5632018-12-14 11:11:47 +00001508 ArrayRef<const std::string> provided_locations(boot_class_path_locations_.data(),
1509 system_hdr.GetComponentCount());
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001510 std::vector<std::string> locations =
Vladimir Marko0ace5632018-12-14 11:11:47 +00001511 ExpandMultiImageLocations(provided_locations, image_location_);
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001512 std::vector<std::string> filenames =
Vladimir Marko0ace5632018-12-14 11:11:47 +00001513 ExpandMultiImageLocations(provided_locations, filename);
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001514 DCHECK_EQ(locations.size(), filenames.size());
1515 std::vector<std::unique_ptr<ImageSpace>> spaces;
1516 spaces.reserve(locations.size());
1517 for (std::size_t i = 0u, size = locations.size(); i != size; ++i) {
1518 spaces.push_back(Load(locations[i], filenames[i], logger, &image_reservation, error_msg));
1519 const ImageSpace* space = spaces.back().get();
1520 if (space == nullptr) {
1521 return false;
1522 }
1523 uint32_t expected_component_count = (i == 0u) ? system_hdr.GetComponentCount() : 0u;
1524 uint32_t expected_reservation_size = (i == 0u) ? system_hdr.GetImageReservationSize() : 0u;
1525 if (!Loader::CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
1526 !Loader::CheckImageComponentCount(*space, expected_component_count, error_msg)) {
1527 return false;
1528 }
1529 }
1530 for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
1531 std::string expected_boot_class_path =
Vladimir Marko0ace5632018-12-14 11:11:47 +00001532 (i == 0u) ? android::base::Join(provided_locations, ':') : std::string();
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001533 if (!OpenOatFile(spaces[i].get(),
1534 boot_class_path_[i],
1535 expected_boot_class_path,
1536 validate_oat_file,
1537 logger,
1538 &image_reservation,
1539 error_msg)) {
1540 return false;
1541 }
1542 }
1543 if (!CheckReservationExhausted(image_reservation, error_msg)) {
1544 return false;
1545 }
1546
1547 MaybeRelocateSpaces(spaces, logger);
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001548 boot_image_spaces->swap(spaces);
1549 *extra_reservation = std::move(local_extra_reservation);
1550 return true;
1551 }
1552
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001553 private:
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001554 class SimpleRelocateVisitor {
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001555 public:
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001556 SimpleRelocateVisitor(uint32_t diff, uint32_t begin, uint32_t size)
1557 : diff_(diff), begin_(begin), size_(size) {}
1558
1559 // Adapter taking the same arguments as SplitRangeRelocateVisitor
1560 // to simplify constructing the various visitors in DoRelocateSpaces().
1561 SimpleRelocateVisitor(uint32_t base_diff,
1562 uint32_t current_diff,
1563 uint32_t bound,
1564 uint32_t begin,
1565 uint32_t size)
1566 : SimpleRelocateVisitor(base_diff, begin, size) {
1567 // Check arguments unused by this class.
1568 DCHECK_EQ(base_diff, current_diff);
1569 DCHECK_EQ(bound, begin);
1570 }
Vladimir Marko4df2d802018-09-27 16:42:44 +00001571
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001572 template <typename T>
1573 ALWAYS_INLINE T* operator()(T* src) const {
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001574 DCHECK(InSource(src));
1575 uint32_t raw_src = reinterpret_cast32<uint32_t>(src);
1576 return reinterpret_cast32<T*>(raw_src + diff_);
1577 }
1578
1579 template <typename T>
1580 ALWAYS_INLINE bool InSource(T* ptr) const {
1581 uint32_t raw_ptr = reinterpret_cast32<uint32_t>(ptr);
1582 return raw_ptr - begin_ < size_;
Vladimir Marko4df2d802018-09-27 16:42:44 +00001583 }
Vladimir Marko4df2d802018-09-27 16:42:44 +00001584
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001585 private:
1586 const uint32_t diff_;
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001587 const uint32_t begin_;
1588 const uint32_t size_;
1589 };
1590
1591 class SplitRangeRelocateVisitor {
1592 public:
1593 SplitRangeRelocateVisitor(uint32_t base_diff,
1594 uint32_t current_diff,
1595 uint32_t bound,
1596 uint32_t begin,
1597 uint32_t size)
1598 : base_diff_(base_diff),
1599 current_diff_(current_diff),
1600 bound_(bound),
1601 begin_(begin),
1602 size_(size) {
1603 DCHECK_NE(begin_, bound_);
1604 // The bound separates the boot image range and the extension range.
1605 DCHECK_LT(bound_ - begin_, size_);
1606 }
1607
1608 template <typename T>
1609 ALWAYS_INLINE T* operator()(T* src) const {
1610 DCHECK(InSource(src));
1611 uint32_t raw_src = reinterpret_cast32<uint32_t>(src);
1612 uint32_t diff = (raw_src < bound_) ? base_diff_ : current_diff_;
1613 return reinterpret_cast32<T*>(raw_src + diff);
1614 }
1615
1616 template <typename T>
1617 ALWAYS_INLINE bool InSource(T* ptr) const {
1618 uint32_t raw_ptr = reinterpret_cast32<uint32_t>(ptr);
1619 return raw_ptr - begin_ < size_;
1620 }
1621
1622 private:
1623 const uint32_t base_diff_;
1624 const uint32_t current_diff_;
1625 const uint32_t bound_;
1626 const uint32_t begin_;
1627 const uint32_t size_;
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001628 };
Vladimir Marko4df2d802018-09-27 16:42:44 +00001629
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001630 static void** PointerAddress(ArtMethod* method, MemberOffset offset) {
1631 return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(method) + offset.Uint32Value());
1632 }
1633
Vladimir Marko4df2d802018-09-27 16:42:44 +00001634 template <PointerSize kPointerSize>
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001635 static void DoRelocateSpaces(ArrayRef<const std::unique_ptr<ImageSpace>>& spaces,
1636 int64_t base_diff64) REQUIRES_SHARED(Locks::mutator_lock_) {
1637 DCHECK(!spaces.empty());
Mathieu Chartier6f382012019-07-30 09:47:35 -07001638 gc::accounting::ContinuousSpaceBitmap patched_objects(
Mathieu Chartier2ffc74b2019-01-03 19:25:41 -08001639 gc::accounting::ContinuousSpaceBitmap::Create(
1640 "Marked objects",
1641 spaces.front()->Begin(),
1642 spaces.back()->End() - spaces.front()->Begin()));
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001643 const ImageHeader& base_header = spaces[0]->GetImageHeader();
1644 size_t base_component_count = base_header.GetComponentCount();
1645 DCHECK_LE(base_component_count, spaces.size());
1646 DoRelocateSpaces<kPointerSize, /*kExtension=*/ false>(
1647 spaces.SubArray(/*pos=*/ 0u, base_component_count),
1648 base_diff64,
Mathieu Chartier6f382012019-07-30 09:47:35 -07001649 &patched_objects);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001650
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001651 for (size_t i = base_component_count, size = spaces.size(); i != size; ) {
1652 const ImageHeader& ext_header = spaces[i]->GetImageHeader();
1653 size_t ext_component_count = ext_header.GetComponentCount();
1654 DCHECK_LE(ext_component_count, size - i);
1655 DoRelocateSpaces<kPointerSize, /*kExtension=*/ true>(
1656 spaces.SubArray(/*pos=*/ i, ext_component_count),
1657 base_diff64,
Mathieu Chartier6f382012019-07-30 09:47:35 -07001658 &patched_objects);
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001659 i += ext_component_count;
1660 }
1661 }
1662
1663 template <PointerSize kPointerSize, bool kExtension>
1664 static void DoRelocateSpaces(ArrayRef<const std::unique_ptr<ImageSpace>> spaces,
1665 int64_t base_diff64,
1666 gc::accounting::ContinuousSpaceBitmap* patched_objects)
1667 REQUIRES_SHARED(Locks::mutator_lock_) {
1668 DCHECK(!spaces.empty());
1669 const ImageHeader& first_header = spaces.front()->GetImageHeader();
1670 uint32_t image_begin = reinterpret_cast32<uint32_t>(first_header.GetImageBegin());
1671 uint32_t image_size = first_header.GetImageReservationSize();
1672 DCHECK_NE(image_size, 0u);
1673 uint32_t source_begin = kExtension ? first_header.GetBootImageBegin() : image_begin;
1674 uint32_t source_size = kExtension ? first_header.GetBootImageSize() + image_size : image_size;
1675 if (kExtension) {
1676 DCHECK_EQ(first_header.GetBootImageBegin() + first_header.GetBootImageSize(), image_begin);
1677 }
1678 int64_t current_diff64 = kExtension
1679 ? static_cast<int64_t>(reinterpret_cast32<uint32_t>(spaces.front()->Begin())) -
1680 static_cast<int64_t>(image_begin)
1681 : base_diff64;
1682 uint32_t base_diff = static_cast<uint32_t>(base_diff64);
1683 uint32_t current_diff = static_cast<uint32_t>(current_diff64);
1684
1685 // For boot image the main visitor is a SimpleRelocateVisitor. For the boot image extension we
1686 // mostly use a SplitRelocationVisitor but some work can still use the SimpleRelocationVisitor.
1687 using MainRelocateVisitor = typename std::conditional<
1688 kExtension, SplitRangeRelocateVisitor, SimpleRelocateVisitor>::type;
1689 SimpleRelocateVisitor simple_relocate_visitor(current_diff, image_begin, image_size);
1690 MainRelocateVisitor main_relocate_visitor(
1691 base_diff, current_diff, /*bound=*/ image_begin, source_begin, source_size);
1692
1693 using MainPatchRelocateVisitor =
1694 PatchObjectVisitor<kPointerSize, MainRelocateVisitor, MainRelocateVisitor>;
1695 using SimplePatchRelocateVisitor =
1696 PatchObjectVisitor<kPointerSize, SimpleRelocateVisitor, SimpleRelocateVisitor>;
1697 MainPatchRelocateVisitor main_patch_object_visitor(main_relocate_visitor,
1698 main_relocate_visitor);
1699 SimplePatchRelocateVisitor simple_patch_object_visitor(simple_relocate_visitor,
1700 simple_relocate_visitor);
1701
1702 // Retrieve the Class.class, Method.class and Constructor.class needed in the loops below.
1703 ObjPtr<mirror::Class> class_class;
1704 ObjPtr<mirror::Class> method_class;
1705 ObjPtr<mirror::Class> constructor_class;
1706 {
1707 ObjPtr<mirror::ObjectArray<mirror::Object>> image_roots =
1708 simple_relocate_visitor(first_header.GetImageRoots<kWithoutReadBarrier>().Ptr());
1709 DCHECK(!patched_objects->Test(image_roots.Ptr()));
1710
1711 SimpleRelocateVisitor base_relocate_visitor(
1712 base_diff,
1713 source_begin,
1714 kExtension ? source_size - image_size : image_size);
1715 int32_t class_roots_index = enum_cast<int32_t>(ImageHeader::kClassRoots);
1716 DCHECK_LT(class_roots_index, image_roots->GetLength<kVerifyNone>());
1717 ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
1718 ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(base_relocate_visitor(
1719 image_roots->GetWithoutChecks<kVerifyNone>(class_roots_index).Ptr()));
1720 if (kExtension) {
1721 DCHECK(patched_objects->Test(class_roots.Ptr()));
1722 class_class = GetClassRoot<mirror::Class, kWithoutReadBarrier>(class_roots);
1723 method_class = GetClassRoot<mirror::Method, kWithoutReadBarrier>(class_roots);
1724 constructor_class = GetClassRoot<mirror::Constructor, kWithoutReadBarrier>(class_roots);
1725 } else {
1726 DCHECK(!patched_objects->Test(class_roots.Ptr()));
1727 class_class = simple_relocate_visitor(
1728 GetClassRoot<mirror::Class, kWithoutReadBarrier>(class_roots).Ptr());
1729 method_class = simple_relocate_visitor(
1730 GetClassRoot<mirror::Method, kWithoutReadBarrier>(class_roots).Ptr());
1731 constructor_class = simple_relocate_visitor(
1732 GetClassRoot<mirror::Constructor, kWithoutReadBarrier>(class_roots).Ptr());
1733 }
1734 }
1735
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001736 for (const std::unique_ptr<ImageSpace>& space : spaces) {
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001737 // First patch the image header.
1738 reinterpret_cast<ImageHeader*>(space->Begin())->RelocateImageReferences(current_diff64);
1739 reinterpret_cast<ImageHeader*>(space->Begin())->RelocateBootImageReferences(base_diff64);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001740
1741 // Patch fields and methods.
1742 const ImageHeader& image_header = space->GetImageHeader();
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001743 image_header.VisitPackedArtFields([&](ArtField& field) REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001744 // Fields always reference class in the current image.
1745 simple_patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001746 &field.DeclaringClassRoot());
1747 }, space->Begin());
1748 image_header.VisitPackedArtMethods([&](ArtMethod& method)
1749 REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001750 main_patch_object_visitor.PatchGcRoot(&method.DeclaringClassRoot());
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001751 void** data_address = PointerAddress(&method, ArtMethod::DataOffset(kPointerSize));
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001752 main_patch_object_visitor.PatchNativePointer(data_address);
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001753 void** entrypoint_address =
1754 PointerAddress(&method, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kPointerSize));
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001755 main_patch_object_visitor.PatchNativePointer(entrypoint_address);
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001756 }, space->Begin(), kPointerSize);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001757 auto method_table_visitor = [&](ArtMethod* method) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00001758 DCHECK(method != nullptr);
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001759 return main_relocate_visitor(method);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001760 };
1761 image_header.VisitPackedImTables(method_table_visitor, space->Begin(), kPointerSize);
1762 image_header.VisitPackedImtConflictTables(method_table_visitor, space->Begin(), kPointerSize);
1763
1764 // Patch the intern table.
1765 if (image_header.GetInternedStringsSection().Size() != 0u) {
1766 const uint8_t* data = space->Begin() + image_header.GetInternedStringsSection().Offset();
1767 size_t read_count;
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001768 InternTable::UnorderedSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001769 for (GcRoot<mirror::String>& slot : temp_set) {
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001770 // The intern table contains only strings in the current image.
1771 simple_patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(&slot);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001772 }
1773 }
1774
1775 // Patch the class table and classes, so that we can traverse class hierarchy to
1776 // determine the types of other objects when we visit them later.
1777 if (image_header.GetClassTableSection().Size() != 0u) {
1778 uint8_t* data = space->Begin() + image_header.GetClassTableSection().Offset();
1779 size_t read_count;
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001780 ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001781 DCHECK(!temp_set.empty());
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001782 // The class table contains only classes in the current image.
1783 ClassTableVisitor class_table_visitor(simple_relocate_visitor);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001784 for (ClassTable::TableSlot& slot : temp_set) {
1785 slot.VisitRoot(class_table_visitor);
Vladimir Marko1fe58392019-04-10 16:14:56 +01001786 ObjPtr<mirror::Class> klass = slot.Read<kWithoutReadBarrier>();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001787 DCHECK(klass != nullptr);
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001788 DCHECK(!patched_objects->Test(klass.Ptr()));
Vladimir Marko1fe58392019-04-10 16:14:56 +01001789 patched_objects->Set(klass.Ptr());
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001790 main_patch_object_visitor.VisitClass(klass, class_class);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001791 // Then patch the non-embedded vtable and iftable.
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001792 ObjPtr<mirror::PointerArray> vtable =
1793 klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001794 if ((kExtension ? simple_relocate_visitor.InSource(vtable.Ptr()) : vtable != nullptr) &&
1795 !patched_objects->Set(vtable.Ptr())) {
1796 main_patch_object_visitor.VisitPointerArray(vtable);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001797 }
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001798 ObjPtr<mirror::IfTable> iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001799 if (iftable != nullptr) {
Vladimir Markodbcb48f2018-11-12 11:47:04 +00001800 int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001801 for (int32_t i = 0; i != ifcount; ++i) {
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001802 ObjPtr<mirror::PointerArray> unpatched_ifarray =
Vladimir Marko4df2d802018-09-27 16:42:44 +00001803 iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001804 if (kExtension ? simple_relocate_visitor.InSource(unpatched_ifarray.Ptr())
1805 : unpatched_ifarray != nullptr) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00001806 // The iftable has not been patched, so we need to explicitly adjust the pointer.
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001807 ObjPtr<mirror::PointerArray> ifarray =
1808 simple_relocate_visitor(unpatched_ifarray.Ptr());
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001809 if (!patched_objects->Set(ifarray.Ptr())) {
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001810 main_patch_object_visitor.VisitPointerArray(ifarray);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001811 }
1812 }
1813 }
1814 }
1815 }
1816 }
1817 }
1818
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001819 for (const std::unique_ptr<ImageSpace>& space : spaces) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00001820 const ImageHeader& image_header = space->GetImageHeader();
1821
1822 static_assert(IsAligned<kObjectAlignment>(sizeof(ImageHeader)), "Header alignment check");
1823 uint32_t objects_end = image_header.GetObjectsSection().Size();
1824 DCHECK_ALIGNED(objects_end, kObjectAlignment);
1825 for (uint32_t pos = sizeof(ImageHeader); pos != objects_end; ) {
1826 mirror::Object* object = reinterpret_cast<mirror::Object*>(space->Begin() + pos);
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001827 // Note: use Test() rather than Set() as this is the last time we're checking this object.
Mathieu Chartier2ffc74b2019-01-03 19:25:41 -08001828 if (!patched_objects->Test(object)) {
1829 // This is the last pass over objects, so we do not need to Set().
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001830 main_patch_object_visitor.VisitObject(object);
Vladimir Marko4617d582019-03-28 13:48:31 +00001831 ObjPtr<mirror::Class> klass = object->GetClass<kVerifyNone, kWithoutReadBarrier>();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001832 if (klass->IsDexCacheClass<kVerifyNone>()) {
1833 // Patch dex cache array pointers and elements.
Vladimir Marko4617d582019-03-28 13:48:31 +00001834 ObjPtr<mirror::DexCache> dex_cache =
1835 object->AsDexCache<kVerifyNone, kWithoutReadBarrier>();
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001836 main_patch_object_visitor.VisitDexCacheArrays(dex_cache);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001837 } else if (klass == method_class || klass == constructor_class) {
1838 // Patch the ArtMethod* in the mirror::Executable subobject.
1839 ObjPtr<mirror::Executable> as_executable =
Vladimir Markod7e9bbf2019-03-28 13:18:57 +00001840 ObjPtr<mirror::Executable>::DownCast(object);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001841 ArtMethod* unpatched_method = as_executable->GetArtMethod<kVerifyNone>();
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001842 ArtMethod* patched_method = main_relocate_visitor(unpatched_method);
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001843 as_executable->SetArtMethod</*kTransactionActive=*/ false,
1844 /*kCheckTransaction=*/ true,
Vladimir Marko4df2d802018-09-27 16:42:44 +00001845 kVerifyNone>(patched_method);
1846 }
1847 }
1848 pos += RoundUp(object->SizeOf<kVerifyNone>(), kObjectAlignment);
1849 }
1850 }
1851 }
1852
Vladimir Marko3364d182019-03-13 13:55:01 +00001853 void MaybeRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
1854 TimingLogger* logger)
Vladimir Marko4df2d802018-09-27 16:42:44 +00001855 REQUIRES_SHARED(Locks::mutator_lock_) {
1856 TimingLogger::ScopedTiming timing("MaybeRelocateSpaces", logger);
1857 ImageSpace* first_space = spaces.front().get();
1858 const ImageHeader& first_space_header = first_space->GetImageHeader();
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001859 int64_t base_diff64 =
1860 static_cast<int64_t>(reinterpret_cast32<uint32_t>(first_space->Begin())) -
1861 static_cast<int64_t>(reinterpret_cast32<uint32_t>(first_space_header.GetImageBegin()));
Vladimir Marko3364d182019-03-13 13:55:01 +00001862 if (!relocate_) {
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001863 DCHECK_EQ(base_diff64, 0);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001864 return;
1865 }
1866
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001867 ArrayRef<const std::unique_ptr<ImageSpace>> spaces_ref(spaces);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001868 PointerSize pointer_size = first_space_header.GetPointerSize();
1869 if (pointer_size == PointerSize::k64) {
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001870 DoRelocateSpaces<PointerSize::k64>(spaces_ref, base_diff64);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001871 } else {
Vladimir Markoc0b30c92019-07-23 14:58:25 +01001872 DoRelocateSpaces<PointerSize::k32>(spaces_ref, base_diff64);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001873 }
1874 }
1875
Vladimir Markoc09cd052018-08-23 16:36:36 +01001876 std::unique_ptr<ImageSpace> Load(const std::string& image_location,
1877 const std::string& image_filename,
Vladimir Marko4df2d802018-09-27 16:42:44 +00001878 TimingLogger* logger,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001879 /*inout*/MemMap* image_reservation,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001880 /*out*/std::string* error_msg)
1881 REQUIRES_SHARED(Locks::mutator_lock_) {
1882 // Should this be a RDWR lock? This is only a defensive measure, as at
1883 // this point the image should exist.
1884 // However, only the zygote can write into the global dalvik-cache, so
1885 // restrict to zygote processes, or any process that isn't using
1886 // /data/dalvik-cache (which we assume to be allowed to write there).
1887 const bool rw_lock = is_zygote_ || !is_global_cache_;
1888
1889 // Note that we must not use the file descriptor associated with
1890 // ScopedFlock::GetFile to Init the image file. We want the file
1891 // descriptor (and the associated exclusive lock) to be released when
1892 // we leave Create.
1893 ScopedFlock image = LockedFile::Open(image_filename.c_str(),
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001894 /*flags=*/ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY,
1895 /*block=*/ true,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001896 error_msg);
1897
1898 VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
1899 << image_location;
1900 // If we are in /system we can assume the image is good. We can also
1901 // assume this if we are using a relocated image (i.e. image checksum
1902 // matches) since this is only different by the offset. We need this to
1903 // make sure that host tests continue to work.
1904 // Since we are the boot image, pass null since we load the oat file from the boot image oat
1905 // file name.
1906 return Loader::Init(image_filename.c_str(),
1907 image_location.c_str(),
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001908 /*oat_file=*/ nullptr,
Vladimir Marko4df2d802018-09-27 16:42:44 +00001909 logger,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001910 image_reservation,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001911 error_msg);
1912 }
1913
Vladimir Marko312f10e2018-11-21 12:35:24 +00001914 bool OpenOatFile(ImageSpace* space,
Vladimir Marko91f10322018-12-07 18:04:10 +00001915 const std::string& dex_filename,
1916 const std::string& expected_boot_class_path,
Vladimir Marko312f10e2018-11-21 12:35:24 +00001917 bool validate_oat_file,
1918 TimingLogger* logger,
1919 /*inout*/MemMap* image_reservation,
1920 /*out*/std::string* error_msg) {
1921 // VerifyImageAllocations() will be called later in Runtime::Init()
1922 // as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
1923 // and ArtField::java_lang_reflect_ArtField_, which are used from
1924 // Object::SizeOf() which VerifyImageAllocations() calls, are not
1925 // set yet at this point.
1926 DCHECK(image_reservation != nullptr);
1927 std::unique_ptr<OatFile> oat_file;
1928 {
1929 TimingLogger::ScopedTiming timing("OpenOatFile", logger);
1930 std::string oat_filename =
1931 ImageHeader::GetOatLocationFromImageLocation(space->GetImageFilename());
Vladimir Marko91f10322018-12-07 18:04:10 +00001932 std::string oat_location =
1933 ImageHeader::GetOatLocationFromImageLocation(space->GetImageLocation());
Vladimir Marko312f10e2018-11-21 12:35:24 +00001934
1935 oat_file.reset(OatFile::Open(/*zip_fd=*/ -1,
1936 oat_filename,
Vladimir Marko91f10322018-12-07 18:04:10 +00001937 oat_location,
Vladimir Marko3364d182019-03-13 13:55:01 +00001938 executable_,
Vladimir Marko312f10e2018-11-21 12:35:24 +00001939 /*low_4gb=*/ false,
Vladimir Marko91f10322018-12-07 18:04:10 +00001940 /*abs_dex_location=*/ dex_filename.c_str(),
Vladimir Marko312f10e2018-11-21 12:35:24 +00001941 image_reservation,
1942 error_msg));
1943 if (oat_file == nullptr) {
1944 *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
1945 oat_filename.c_str(),
1946 space->GetName(),
1947 error_msg->c_str());
1948 return false;
1949 }
1950 const ImageHeader& image_header = space->GetImageHeader();
1951 uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
1952 uint32_t image_oat_checksum = image_header.GetOatChecksum();
1953 if (oat_checksum != image_oat_checksum) {
1954 *error_msg = StringPrintf("Failed to match oat file checksum 0x%x to expected oat checksum"
1955 " 0x%x in image %s",
1956 oat_checksum,
1957 image_oat_checksum,
1958 space->GetName());
1959 return false;
1960 }
Vladimir Marko91f10322018-12-07 18:04:10 +00001961 const char* oat_boot_class_path =
1962 oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathKey);
1963 oat_boot_class_path = (oat_boot_class_path != nullptr) ? oat_boot_class_path : "";
1964 if (expected_boot_class_path != oat_boot_class_path) {
1965 *error_msg = StringPrintf("Failed to match oat boot class path %s to expected "
1966 "boot class path %s in image %s",
1967 oat_boot_class_path,
1968 expected_boot_class_path.c_str(),
1969 space->GetName());
1970 return false;
1971 }
Vladimir Marko312f10e2018-11-21 12:35:24 +00001972 ptrdiff_t relocation_diff = space->Begin() - image_header.GetImageBegin();
1973 CHECK(image_header.GetOatDataBegin() != nullptr);
1974 uint8_t* oat_data_begin = image_header.GetOatDataBegin() + relocation_diff;
1975 if (oat_file->Begin() != oat_data_begin) {
1976 *error_msg = StringPrintf("Oat file '%s' referenced from image %s has unexpected begin"
1977 " %p v. %p",
1978 oat_filename.c_str(),
1979 space->GetName(),
1980 oat_file->Begin(),
1981 oat_data_begin);
1982 return false;
1983 }
1984 }
1985 if (validate_oat_file) {
1986 TimingLogger::ScopedTiming timing("ValidateOatFile", logger);
1987 if (!ImageSpace::ValidateOatFile(*oat_file, error_msg)) {
1988 DCHECK(!error_msg->empty());
1989 return false;
1990 }
1991 }
1992 space->oat_file_ = std::move(oat_file);
1993 space->oat_file_non_owned_ = space->oat_file_.get();
1994 return true;
1995 }
1996
Vladimir Marko312f10e2018-11-21 12:35:24 +00001997 bool ReserveBootImageMemory(uint32_t reservation_size,
1998 uint32_t image_start,
Vladimir Markod44d7032018-08-30 13:02:31 +01001999 size_t extra_reservation_size,
Vladimir Markoc09cd052018-08-23 16:36:36 +01002000 /*out*/MemMap* image_reservation,
Vladimir Markod44d7032018-08-30 13:02:31 +01002001 /*out*/MemMap* extra_reservation,
Vladimir Markoc09cd052018-08-23 16:36:36 +01002002 /*out*/std::string* error_msg) {
Vladimir Marko7391c8c2018-11-21 17:58:44 +00002003 DCHECK_ALIGNED(reservation_size, kPageSize);
2004 DCHECK_ALIGNED(image_start, kPageSize);
Vladimir Markoc09cd052018-08-23 16:36:36 +01002005 DCHECK(!image_reservation->IsValid());
Vladimir Marko312f10e2018-11-21 12:35:24 +00002006 DCHECK_LT(extra_reservation_size, std::numeric_limits<uint32_t>::max() - reservation_size);
2007 size_t total_size = reservation_size + extra_reservation_size;
Vladimir Markoae581ed2018-10-08 09:29:05 +01002008 // If relocating, choose a random address for ALSR.
Vladimir Marko3364d182019-03-13 13:55:01 +00002009 uint32_t addr = relocate_ ? ART_BASE_ADDRESS + ChooseRelocationOffsetDelta() : image_start;
Vladimir Markoc09cd052018-08-23 16:36:36 +01002010 *image_reservation =
2011 MemMap::MapAnonymous("Boot image reservation",
Vladimir Markoae581ed2018-10-08 09:29:05 +01002012 reinterpret_cast32<uint8_t*>(addr),
2013 total_size,
Vladimir Markoc09cd052018-08-23 16:36:36 +01002014 PROT_NONE,
Vladimir Markof4efa9e2018-10-17 14:12:45 +01002015 /*low_4gb=*/ true,
2016 /*reuse=*/ false,
2017 /*reservation=*/ nullptr,
Vladimir Markoc09cd052018-08-23 16:36:36 +01002018 error_msg);
2019 if (!image_reservation->IsValid()) {
2020 return false;
2021 }
Vladimir Markod44d7032018-08-30 13:02:31 +01002022 DCHECK(!extra_reservation->IsValid());
2023 if (extra_reservation_size != 0u) {
2024 DCHECK_ALIGNED(extra_reservation_size, kPageSize);
2025 DCHECK_LT(extra_reservation_size, image_reservation->Size());
2026 uint8_t* split = image_reservation->End() - extra_reservation_size;
2027 *extra_reservation = image_reservation->RemapAtEnd(split,
2028 "Boot image extra reservation",
2029 PROT_NONE,
2030 error_msg);
2031 if (!extra_reservation->IsValid()) {
2032 return false;
2033 }
2034 }
Vladimir Markoc09cd052018-08-23 16:36:36 +01002035
2036 return true;
2037 }
2038
Vladimir Marko312f10e2018-11-21 12:35:24 +00002039 bool CheckReservationExhausted(const MemMap& image_reservation, /*out*/std::string* error_msg) {
Vladimir Markoc09cd052018-08-23 16:36:36 +01002040 if (image_reservation.IsValid()) {
2041 *error_msg = StringPrintf("Excessive image reservation after loading boot image: %p-%p",
2042 image_reservation.Begin(),
2043 image_reservation.End());
2044 return false;
2045 }
Vladimir Markoc09cd052018-08-23 16:36:36 +01002046 return true;
2047 }
2048
Vladimir Marko91f10322018-12-07 18:04:10 +00002049 const std::vector<std::string>& boot_class_path_;
2050 const std::vector<std::string>& boot_class_path_locations_;
Vladimir Marko82e1e272018-08-20 13:38:06 +00002051 const std::string& image_location_;
2052 InstructionSet image_isa_;
Vladimir Marko3364d182019-03-13 13:55:01 +00002053 bool relocate_;
2054 bool executable_;
Vladimir Marko82e1e272018-08-20 13:38:06 +00002055 bool is_zygote_;
2056 bool has_system_;
2057 bool has_cache_;
2058 bool is_global_cache_;
Vladimir Markoe3070022018-08-22 09:36:19 +00002059 bool dalvik_cache_exists_;
Vladimir Marko82e1e272018-08-20 13:38:06 +00002060 std::string dalvik_cache_;
2061 std::string cache_filename_;
2062};
2063
Andreas Gampea463b6a2016-08-12 21:53:32 -07002064static constexpr uint64_t kLowSpaceValue = 50 * MB;
2065static constexpr uint64_t kTmpFsSentinelValue = 384 * MB;
2066
2067// Read the free space of the cache partition and make a decision whether to keep the generated
2068// image. This is to try to mitigate situations where the system might run out of space later.
2069static bool CheckSpace(const std::string& cache_filename, std::string* error_msg) {
2070 // Using statvfs vs statvfs64 because of b/18207376, and it is enough for all practical purposes.
2071 struct statvfs buf;
2072
2073 int res = TEMP_FAILURE_RETRY(statvfs(cache_filename.c_str(), &buf));
2074 if (res != 0) {
2075 // Could not stat. Conservatively tell the system to delete the image.
2076 *error_msg = "Could not stat the filesystem, assuming low-memory situation.";
2077 return false;
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +00002078 }
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +00002079
Andreas Gampea463b6a2016-08-12 21:53:32 -07002080 uint64_t fs_overall_size = buf.f_bsize * static_cast<uint64_t>(buf.f_blocks);
2081 // Zygote is privileged, but other things are not. Use bavail.
2082 uint64_t fs_free_size = buf.f_bsize * static_cast<uint64_t>(buf.f_bavail);
Brian Carlstrom56d947f2013-07-15 13:14:23 -07002083
Andreas Gampea463b6a2016-08-12 21:53:32 -07002084 // Take the overall size as an indicator for a tmpfs, which is being used for the decryption
2085 // environment. We do not want to fail quickening the boot image there, as it is beneficial
2086 // for time-to-UI.
2087 if (fs_overall_size > kTmpFsSentinelValue) {
2088 if (fs_free_size < kLowSpaceValue) {
2089 *error_msg = StringPrintf("Low-memory situation: only %4.2f megabytes available, need at "
2090 "least %" PRIu64 ".",
2091 static_cast<double>(fs_free_size) / MB,
2092 kLowSpaceValue / MB);
Brian Carlstrom56d947f2013-07-15 13:14:23 -07002093 return false;
2094 }
2095 }
2096 return true;
2097}
2098
Vladimir Marko82e1e272018-08-20 13:38:06 +00002099bool ImageSpace::LoadBootImage(
Vladimir Marko91f10322018-12-07 18:04:10 +00002100 const std::vector<std::string>& boot_class_path,
2101 const std::vector<std::string>& boot_class_path_locations,
Vladimir Marko82e1e272018-08-20 13:38:06 +00002102 const std::string& image_location,
2103 const InstructionSet image_isa,
Andreas Gampe86823542019-02-25 09:38:49 -08002104 ImageSpaceLoadingOrder order,
Vladimir Marko3364d182019-03-13 13:55:01 +00002105 bool relocate,
2106 bool executable,
2107 bool is_zygote,
Vladimir Markod44d7032018-08-30 13:02:31 +01002108 size_t extra_reservation_size,
2109 /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
2110 /*out*/MemMap* extra_reservation) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07002111 ScopedTrace trace(__FUNCTION__);
2112
Vladimir Marko82e1e272018-08-20 13:38:06 +00002113 DCHECK(boot_image_spaces != nullptr);
2114 DCHECK(boot_image_spaces->empty());
Vladimir Markod44d7032018-08-30 13:02:31 +01002115 DCHECK_ALIGNED(extra_reservation_size, kPageSize);
2116 DCHECK(extra_reservation != nullptr);
Vladimir Marko82e1e272018-08-20 13:38:06 +00002117 DCHECK_NE(image_isa, InstructionSet::kNone);
2118
2119 if (image_location.empty()) {
2120 return false;
2121 }
2122
Vladimir Marko3364d182019-03-13 13:55:01 +00002123 BootImageLoader loader(boot_class_path,
2124 boot_class_path_locations,
2125 image_location,
2126 image_isa,
2127 relocate,
2128 executable,
2129 is_zygote);
Vladimir Marko82e1e272018-08-20 13:38:06 +00002130
Andreas Gampea463b6a2016-08-12 21:53:32 -07002131 // Step 0: Extra zygote work.
2132
2133 // Step 0.a: If we're the zygote, mark boot.
Vladimir Marko82e1e272018-08-20 13:38:06 +00002134 if (loader.IsZygote() && CanWriteToDalvikCache(image_isa)) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07002135 MarkZygoteStart(image_isa, Runtime::Current()->GetZygoteMaxFailedBoots());
2136 }
2137
Vladimir Marko82e1e272018-08-20 13:38:06 +00002138 loader.FindImageFiles();
2139
Andreas Gampea463b6a2016-08-12 21:53:32 -07002140 // Step 0.b: If we're the zygote, check for free space, and prune the cache preemptively,
2141 // if necessary. While the runtime may be fine (it is pretty tolerant to
2142 // out-of-disk-space situations), other parts of the platform are not.
2143 //
2144 // The advantage of doing this proactively is that the later steps are simplified,
2145 // i.e., we do not need to code retries.
Vladimir Marko3364d182019-03-13 13:55:01 +00002146 bool low_space = false;
Vladimir Marko82e1e272018-08-20 13:38:06 +00002147 if (loader.IsZygote() && loader.DalvikCacheExists()) {
Andreas Gampe6e74abb2018-03-01 17:33:19 -08002148 // Extra checks for the zygote. These only apply when loading the first image, explained below.
Vladimir Marko82e1e272018-08-20 13:38:06 +00002149 const std::string& dalvik_cache = loader.GetDalvikCache();
Andreas Gampea463b6a2016-08-12 21:53:32 -07002150 DCHECK(!dalvik_cache.empty());
2151 std::string local_error_msg;
Andreas Gampe6e74abb2018-03-01 17:33:19 -08002152 bool check_space = CheckSpace(dalvik_cache, &local_error_msg);
Vladimir Marko4df2d802018-09-27 16:42:44 +00002153 if (!check_space) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07002154 LOG(WARNING) << local_error_msg << " Preemptively pruning the dalvik cache.";
2155 PruneDalvikCache(image_isa);
2156
2157 // Re-evaluate the image.
Vladimir Marko82e1e272018-08-20 13:38:06 +00002158 loader.FindImageFiles();
Vladimir Marko3364d182019-03-13 13:55:01 +00002159
Andreas Gampe6e74abb2018-03-01 17:33:19 -08002160 // Disable compilation/patching - we do not want to fill up the space again.
Vladimir Marko3364d182019-03-13 13:55:01 +00002161 low_space = true;
Andreas Gampe6e74abb2018-03-01 17:33:19 -08002162 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07002163 }
2164
2165 // Collect all the errors.
2166 std::vector<std::string> error_msgs;
2167
Andreas Gampe86823542019-02-25 09:38:49 -08002168 auto try_load_from = [&](auto has_fn, auto load_fn, bool validate_oat_file) {
2169 if ((loader.*has_fn)()) {
2170 std::string local_error_msg;
2171 if ((loader.*load_fn)(validate_oat_file,
2172 extra_reservation_size,
2173 boot_image_spaces,
2174 extra_reservation,
2175 &local_error_msg)) {
2176 return true;
2177 }
2178 error_msgs.push_back(local_error_msg);
2179 }
2180 return false;
2181 };
Andreas Gampea463b6a2016-08-12 21:53:32 -07002182
Andreas Gampe86823542019-02-25 09:38:49 -08002183 auto try_load_from_system = [&]() {
Andreas Gampe96b3baa2019-03-12 12:45:58 -07002184 // Validate the oat files if the loading order checks data first. Otherwise assume system
2185 // integrity.
2186 return try_load_from(&BootImageLoader::HasSystem,
2187 &BootImageLoader::LoadFromSystem,
2188 /*validate_oat_file=*/ order != ImageSpaceLoadingOrder::kSystemFirst);
Andreas Gampe86823542019-02-25 09:38:49 -08002189 };
2190 auto try_load_from_cache = [&]() {
Andreas Gampe96b3baa2019-03-12 12:45:58 -07002191 // Always validate oat files from the dalvik cache.
2192 return try_load_from(&BootImageLoader::HasCache,
2193 &BootImageLoader::LoadFromDalvikCache,
2194 /*validate_oat_file=*/ true);
Andreas Gampe86823542019-02-25 09:38:49 -08002195 };
2196
2197 auto invoke_sequentially = [](auto first, auto second) {
2198 return first() || second();
2199 };
2200
2201 // Step 1+2: Check system and cache images in the asked-for order.
2202 if (order == ImageSpaceLoadingOrder::kSystemFirst) {
2203 if (invoke_sequentially(try_load_from_system, try_load_from_cache)) {
Vladimir Marko82e1e272018-08-20 13:38:06 +00002204 return true;
Andreas Gampea463b6a2016-08-12 21:53:32 -07002205 }
Andreas Gampe86823542019-02-25 09:38:49 -08002206 } else {
2207 if (invoke_sequentially(try_load_from_cache, try_load_from_system)) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00002208 return true;
Andreas Gampea463b6a2016-08-12 21:53:32 -07002209 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07002210 }
2211
Vladimir Marko82e1e272018-08-20 13:38:06 +00002212 // Step 3: We do not have an existing image in /system,
2213 // so generate an image into the dalvik cache.
Vladimir Markoe3070022018-08-22 09:36:19 +00002214 if (!loader.HasSystem() && loader.DalvikCacheExists()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07002215 std::string local_error_msg;
Vladimir Marko3364d182019-03-13 13:55:01 +00002216 if (low_space || !Runtime::Current()->IsImageDex2OatEnabled()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07002217 local_error_msg = "Image compilation disabled.";
Vladimir Marko3364d182019-03-13 13:55:01 +00002218 } else if (ImageCreationAllowed(loader.IsGlobalCache(),
2219 image_isa,
2220 is_zygote,
2221 &local_error_msg)) {
Vladimir Marko82e1e272018-08-20 13:38:06 +00002222 bool compilation_success =
2223 GenerateImage(loader.GetCacheFilename(), image_isa, &local_error_msg);
Andreas Gampea463b6a2016-08-12 21:53:32 -07002224 if (compilation_success) {
Vladimir Markof4efa9e2018-10-17 14:12:45 +01002225 if (loader.LoadFromDalvikCache(/*validate_oat_file=*/ false,
Vladimir Markod44d7032018-08-30 13:02:31 +01002226 extra_reservation_size,
Vladimir Marko82e1e272018-08-20 13:38:06 +00002227 boot_image_spaces,
Vladimir Markod44d7032018-08-30 13:02:31 +01002228 extra_reservation,
Vladimir Marko82e1e272018-08-20 13:38:06 +00002229 &local_error_msg)) {
2230 return true;
Andreas Gampea463b6a2016-08-12 21:53:32 -07002231 }
2232 }
2233 }
2234 error_msgs.push_back(StringPrintf("Cannot compile image to %s: %s",
Vladimir Marko82e1e272018-08-20 13:38:06 +00002235 loader.GetCacheFilename().c_str(),
Andreas Gampea463b6a2016-08-12 21:53:32 -07002236 local_error_msg.c_str()));
2237 }
2238
Vladimir Marko82e1e272018-08-20 13:38:06 +00002239 // We failed. Prune the cache the free up space, create a compound error message
2240 // and return false.
Vladimir Marko3364d182019-03-13 13:55:01 +00002241 if (loader.DalvikCacheExists()) {
2242 PruneDalvikCache(image_isa);
2243 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07002244
2245 std::ostringstream oss;
2246 bool first = true;
Andreas Gampe4c481a42016-11-03 08:21:59 -07002247 for (const auto& msg : error_msgs) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07002248 if (!first) {
2249 oss << "\n ";
2250 }
2251 oss << msg;
2252 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07002253
Vladimir Marko82e1e272018-08-20 13:38:06 +00002254 LOG(ERROR) << "Could not create image space with image file '" << image_location << "'. "
2255 << "Attempting to fall back to imageless running. Error was: " << oss.str();
Andreas Gampea463b6a2016-08-12 21:53:32 -07002256
Vladimir Marko82e1e272018-08-20 13:38:06 +00002257 return false;
Andreas Gampe2bd84282016-12-05 12:37:36 -08002258}
2259
Igor Murashkin8275fba2017-05-02 15:58:02 -07002260ImageSpace::~ImageSpace() {
Vladimir Marko3364d182019-03-13 13:55:01 +00002261 // Everything done by member destructors. Classes forward-declared in header are now defined.
Igor Murashkin8275fba2017-05-02 15:58:02 -07002262}
2263
Andreas Gampea463b6a2016-08-12 21:53:32 -07002264std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image,
2265 const OatFile* oat_file,
2266 std::string* error_msg) {
Vladimir Marko312f10e2018-11-21 12:35:24 +00002267 // Note: The oat file has already been validated.
Vladimir Marko4df2d802018-09-27 16:42:44 +00002268 return Loader::InitAppImage(image,
2269 image,
Vladimir Marko4df2d802018-09-27 16:42:44 +00002270 oat_file,
Vladimir Markof4efa9e2018-10-17 14:12:45 +01002271 /*image_reservation=*/ nullptr,
Vladimir Marko4df2d802018-09-27 16:42:44 +00002272 error_msg);
Andreas Gampea463b6a2016-08-12 21:53:32 -07002273}
2274
Andreas Gampe22f8e5c2014-07-09 11:38:21 -07002275const OatFile* ImageSpace::GetOatFile() const {
Andreas Gampe88da3b02015-06-12 20:38:49 -07002276 return oat_file_non_owned_;
Andreas Gampe22f8e5c2014-07-09 11:38:21 -07002277}
2278
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -07002279std::unique_ptr<const OatFile> ImageSpace::ReleaseOatFile() {
2280 CHECK(oat_file_ != nullptr);
2281 return std::move(oat_file_);
Ian Rogers1d54e732013-05-02 21:10:01 -07002282}
2283
Ian Rogers1d54e732013-05-02 21:10:01 -07002284void ImageSpace::Dump(std::ostream& os) const {
2285 os << GetType()
Mathieu Chartier590fee92013-09-13 13:46:47 -07002286 << " begin=" << reinterpret_cast<void*>(Begin())
Ian Rogers1d54e732013-05-02 21:10:01 -07002287 << ",end=" << reinterpret_cast<void*>(End())
2288 << ",size=" << PrettySize(Size())
2289 << ",name=\"" << GetName() << "\"]";
2290}
2291
Richard Uhler84f50ae2017-02-06 15:12:45 +00002292bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg) {
David Sehr013fd802018-01-11 22:55:24 -08002293 const ArtDexFileLoader dex_file_loader;
Andreas Gampeb40d3612018-06-26 15:49:42 -07002294 for (const OatDexFile* oat_dex_file : oat_file.GetOatDexFiles()) {
Richard Uhler84f50ae2017-02-06 15:12:45 +00002295 const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
2296
2297 // Skip multidex locations - These will be checked when we visit their
2298 // corresponding primary non-multidex location.
Mathieu Chartier79c87da2017-10-10 11:54:29 -07002299 if (DexFileLoader::IsMultiDexLocation(dex_file_location.c_str())) {
Richard Uhler84f50ae2017-02-06 15:12:45 +00002300 continue;
2301 }
2302
2303 std::vector<uint32_t> checksums;
David Sehr013fd802018-01-11 22:55:24 -08002304 if (!dex_file_loader.GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) {
Richard Uhler84f50ae2017-02-06 15:12:45 +00002305 *error_msg = StringPrintf("ValidateOatFile failed to get checksums of dex file '%s' "
2306 "referenced by oat file %s: %s",
2307 dex_file_location.c_str(),
2308 oat_file.GetLocation().c_str(),
2309 error_msg->c_str());
2310 return false;
2311 }
2312 CHECK(!checksums.empty());
2313 if (checksums[0] != oat_dex_file->GetDexFileLocationChecksum()) {
2314 *error_msg = StringPrintf("ValidateOatFile found checksum mismatch between oat file "
2315 "'%s' and dex file '%s' (0x%x != 0x%x)",
2316 oat_file.GetLocation().c_str(),
2317 dex_file_location.c_str(),
2318 oat_dex_file->GetDexFileLocationChecksum(),
2319 checksums[0]);
2320 return false;
2321 }
2322
2323 // Verify checksums for any related multidex entries.
2324 for (size_t i = 1; i < checksums.size(); i++) {
Mathieu Chartier79c87da2017-10-10 11:54:29 -07002325 std::string multi_dex_location = DexFileLoader::GetMultiDexLocation(
2326 i,
2327 dex_file_location.c_str());
Andreas Gampeb40d3612018-06-26 15:49:42 -07002328 const OatDexFile* multi_dex = oat_file.GetOatDexFile(multi_dex_location.c_str(),
2329 nullptr,
2330 error_msg);
Richard Uhler84f50ae2017-02-06 15:12:45 +00002331 if (multi_dex == nullptr) {
2332 *error_msg = StringPrintf("ValidateOatFile oat file '%s' is missing entry '%s'",
2333 oat_file.GetLocation().c_str(),
2334 multi_dex_location.c_str());
2335 return false;
2336 }
2337
2338 if (checksums[i] != multi_dex->GetDexFileLocationChecksum()) {
2339 *error_msg = StringPrintf("ValidateOatFile found checksum mismatch between oat file "
2340 "'%s' and dex file '%s' (0x%x != 0x%x)",
2341 oat_file.GetLocation().c_str(),
2342 multi_dex_location.c_str(),
2343 multi_dex->GetDexFileLocationChecksum(),
2344 checksums[i]);
2345 return false;
2346 }
2347 }
2348 }
2349 return true;
2350}
2351
Vladimir Markobcd99be2019-03-22 16:21:31 +00002352std::string ImageSpace::GetBootClassPathChecksums(ArrayRef<const std::string> boot_class_path,
Vladimir Marko0ace5632018-12-14 11:11:47 +00002353 const std::string& image_location,
2354 InstructionSet image_isa,
Andreas Gampe86823542019-02-25 09:38:49 -08002355 ImageSpaceLoadingOrder order,
Vladimir Marko0ace5632018-12-14 11:11:47 +00002356 /*out*/std::string* error_msg) {
2357 std::string system_filename;
2358 bool has_system = false;
2359 std::string cache_filename;
2360 bool has_cache = false;
2361 bool dalvik_cache_exists = false;
2362 bool is_global_cache = false;
2363 if (!FindImageFilename(image_location.c_str(),
2364 image_isa,
2365 &system_filename,
2366 &has_system,
2367 &cache_filename,
2368 &dalvik_cache_exists,
2369 &has_cache,
2370 &is_global_cache)) {
2371 *error_msg = StringPrintf("Unable to find image file for %s and %s",
2372 image_location.c_str(),
2373 GetInstructionSetString(image_isa));
2374 return std::string();
2375 }
2376
2377 DCHECK(has_system || has_cache);
Andreas Gampe86823542019-02-25 09:38:49 -08002378 const std::string& filename = (order == ImageSpaceLoadingOrder::kSystemFirst)
2379 ? (has_system ? system_filename : cache_filename)
2380 : (has_cache ? cache_filename : system_filename);
Vladimir Marko0ace5632018-12-14 11:11:47 +00002381 std::unique_ptr<ImageHeader> header = ReadSpecificImageHeader(filename.c_str(), error_msg);
2382 if (header == nullptr) {
2383 return std::string();
2384 }
2385 if (header->GetComponentCount() == 0u || header->GetComponentCount() > boot_class_path.size()) {
2386 *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
2387 "expected non-zero and <= %zu",
2388 filename.c_str(),
2389 header->GetComponentCount(),
2390 boot_class_path.size());
2391 return std::string();
2392 }
2393
2394 std::string boot_image_checksum =
2395 StringPrintf("i;%d/%08x", header->GetComponentCount(), header->GetImageChecksum());
2396 ArrayRef<const std::string> boot_class_path_tail =
2397 ArrayRef<const std::string>(boot_class_path).SubArray(header->GetComponentCount());
2398 for (const std::string& bcp_filename : boot_class_path_tail) {
2399 std::vector<std::unique_ptr<const DexFile>> dex_files;
2400 const ArtDexFileLoader dex_file_loader;
2401 if (!dex_file_loader.Open(bcp_filename.c_str(),
2402 bcp_filename, // The location does not matter here.
2403 /*verify=*/ false,
2404 /*verify_checksum=*/ false,
2405 error_msg,
2406 &dex_files)) {
2407 return std::string();
2408 }
2409 DCHECK(!dex_files.empty());
2410 StringAppendF(&boot_image_checksum, ":d");
2411 for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
2412 StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
2413 }
2414 }
2415 return boot_image_checksum;
2416}
2417
2418std::string ImageSpace::GetBootClassPathChecksums(
2419 const std::vector<ImageSpace*>& image_spaces,
2420 const std::vector<const DexFile*>& boot_class_path) {
Vladimir Marko0ace5632018-12-14 11:11:47 +00002421 size_t pos = 0u;
David Brazdil2c5bcb82019-04-03 11:14:34 +01002422 std::string boot_image_checksum;
2423
2424 if (!image_spaces.empty()) {
2425 const ImageHeader& primary_header = image_spaces.front()->GetImageHeader();
2426 uint32_t component_count = primary_header.GetComponentCount();
2427 DCHECK_EQ(component_count, image_spaces.size());
2428 boot_image_checksum =
2429 StringPrintf("i;%d/%08x", component_count, primary_header.GetImageChecksum());
2430 for (const ImageSpace* space : image_spaces) {
2431 size_t num_dex_files = space->oat_file_non_owned_->GetOatDexFiles().size();
2432 if (kIsDebugBuild) {
2433 CHECK_NE(num_dex_files, 0u);
2434 CHECK_LE(space->oat_file_non_owned_->GetOatDexFiles().size(), boot_class_path.size() - pos);
2435 for (size_t i = 0; i != num_dex_files; ++i) {
2436 CHECK_EQ(space->oat_file_non_owned_->GetOatDexFiles()[i]->GetDexFileLocation(),
2437 boot_class_path[pos + i]->GetLocation());
2438 }
Vladimir Marko0ace5632018-12-14 11:11:47 +00002439 }
David Brazdil2c5bcb82019-04-03 11:14:34 +01002440 pos += num_dex_files;
Vladimir Marko0ace5632018-12-14 11:11:47 +00002441 }
Vladimir Marko0ace5632018-12-14 11:11:47 +00002442 }
David Brazdil2c5bcb82019-04-03 11:14:34 +01002443
Vladimir Marko0ace5632018-12-14 11:11:47 +00002444 ArrayRef<const DexFile* const> boot_class_path_tail =
2445 ArrayRef<const DexFile* const>(boot_class_path).SubArray(pos);
2446 DCHECK(boot_class_path_tail.empty() ||
2447 !DexFileLoader::IsMultiDexLocation(boot_class_path_tail.front()->GetLocation().c_str()));
2448 for (const DexFile* dex_file : boot_class_path_tail) {
2449 if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) {
David Brazdil2c5bcb82019-04-03 11:14:34 +01002450 StringAppendF(&boot_image_checksum, boot_image_checksum.empty() ? "d" : ":d");
Vladimir Marko0ace5632018-12-14 11:11:47 +00002451 }
2452 StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
2453 }
2454 return boot_image_checksum;
2455}
2456
Vladimir Marko91f10322018-12-07 18:04:10 +00002457std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
2458 const std::vector<std::string>& dex_locations,
2459 const std::string& image_location) {
Vladimir Marko0ace5632018-12-14 11:11:47 +00002460 return ExpandMultiImageLocations(ArrayRef<const std::string>(dex_locations), image_location);
2461}
2462
2463std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
2464 ArrayRef<const std::string> dex_locations,
2465 const std::string& image_location) {
Vladimir Marko91f10322018-12-07 18:04:10 +00002466 DCHECK(!dex_locations.empty());
Andreas Gampe8994a042015-12-30 19:03:17 +00002467
Vladimir Marko91f10322018-12-07 18:04:10 +00002468 // Find the path.
2469 size_t last_slash = image_location.rfind('/');
2470 CHECK_NE(last_slash, std::string::npos);
Andreas Gampe8994a042015-12-30 19:03:17 +00002471
Vladimir Marko91f10322018-12-07 18:04:10 +00002472 // We also need to honor path components that were encoded through '@'. Otherwise the loading
2473 // code won't be able to find the images.
2474 if (image_location.find('@', last_slash) != std::string::npos) {
2475 last_slash = image_location.rfind('@');
Andreas Gampe8994a042015-12-30 19:03:17 +00002476 }
Andreas Gampe8994a042015-12-30 19:03:17 +00002477
Vladimir Marko91f10322018-12-07 18:04:10 +00002478 // Find the dot separating the primary image name from the extension.
2479 size_t last_dot = image_location.rfind('.');
2480 // Extract the extension and base (the path and primary image name).
2481 std::string extension;
2482 std::string base = image_location;
2483 if (last_dot != std::string::npos && last_dot > last_slash) {
2484 extension = image_location.substr(last_dot); // Including the dot.
2485 base.resize(last_dot);
Andreas Gampe8994a042015-12-30 19:03:17 +00002486 }
Vladimir Marko91f10322018-12-07 18:04:10 +00002487 // For non-empty primary image name, add '-' to the `base`.
2488 if (last_slash + 1u != base.size()) {
2489 base += '-';
2490 }
2491
2492 std::vector<std::string> locations;
2493 locations.reserve(dex_locations.size());
2494 locations.push_back(image_location);
2495
2496 // Now create the other names. Use a counted loop to skip the first one.
2497 for (size_t i = 1u; i < dex_locations.size(); ++i) {
2498 // Replace path with `base` (i.e. image path and prefix) and replace the original
2499 // extension (if any) with `extension`.
2500 std::string name = dex_locations[i];
2501 size_t last_dex_slash = name.rfind('/');
2502 if (last_dex_slash != std::string::npos) {
2503 name = name.substr(last_dex_slash + 1);
2504 }
2505 size_t last_dex_dot = name.rfind('.');
2506 if (last_dex_dot != std::string::npos) {
2507 name.resize(last_dex_dot);
2508 }
2509 locations.push_back(base + name + extension);
2510 }
2511 return locations;
Andreas Gampe8994a042015-12-30 19:03:17 +00002512}
2513
Mathieu Chartierd5f3f322016-03-21 14:05:56 -07002514void ImageSpace::DumpSections(std::ostream& os) const {
2515 const uint8_t* base = Begin();
2516 const ImageHeader& header = GetImageHeader();
2517 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
2518 auto section_type = static_cast<ImageHeader::ImageSections>(i);
2519 const ImageSection& section = header.GetImageSection(section_type);
2520 os << section_type << " " << reinterpret_cast<const void*>(base + section.Offset())
2521 << "-" << reinterpret_cast<const void*>(base + section.End()) << "\n";
2522 }
2523}
2524
Mathieu Chartier6e7a72c2019-03-07 21:40:10 -08002525void ImageSpace::DisablePreResolvedStrings() {
2526 // Clear dex cache pointers.
2527 ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
2528 GetImageHeader().GetImageRoot(ImageHeader::kDexCaches)->AsObjectArray<mirror::DexCache>();
2529 for (size_t len = dex_caches->GetLength(), i = 0; i < len; ++i) {
2530 ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
2531 dex_cache->ClearPreResolvedStrings();
2532 }
2533}
2534
2535void ImageSpace::ReleaseMetadata() {
2536 const ImageSection& metadata = GetImageHeader().GetMetadataSection();
2537 VLOG(image) << "Releasing " << metadata.Size() << " image metadata bytes";
2538 // In the case where new app images may have been added around the checkpoint, ensure that we
2539 // don't madvise the cache for these.
2540 ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
2541 GetImageHeader().GetImageRoot(ImageHeader::kDexCaches)->AsObjectArray<mirror::DexCache>();
2542 bool have_startup_cache = false;
2543 for (size_t len = dex_caches->GetLength(), i = 0; i < len; ++i) {
2544 ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
2545 if (dex_cache->NumPreResolvedStrings() != 0u) {
2546 have_startup_cache = true;
2547 }
2548 }
2549 // Only safe to do for images that have their preresolved strings caches disabled. This is because
2550 // uncompressed images madvise to the original unrelocated image contents.
2551 if (!have_startup_cache) {
2552 // Avoid using ZeroAndReleasePages since the zero fill might not be word atomic.
2553 uint8_t* const page_begin = AlignUp(Begin() + metadata.Offset(), kPageSize);
2554 uint8_t* const page_end = AlignDown(Begin() + metadata.End(), kPageSize);
2555 if (page_begin < page_end) {
2556 CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
2557 }
2558 }
2559}
2560
Ian Rogers1d54e732013-05-02 21:10:01 -07002561} // namespace space
2562} // namespace gc
2563} // namespace art