blob: 346b81134ecbfe33845fd9f169279af550b4f6e4 [file] [log] [blame]
Ian Rogers1d54e732013-05-02 21:10:01 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "image_space.h"
18
Andreas Gampe70be1fb2014-10-31 16:45:19 -070019#include <sys/statvfs.h>
Alex Light25396132014-08-27 15:37:23 -070020#include <sys/types.h>
Narayan Kamath5a2be3f2015-02-16 13:51:51 +000021#include <unistd.h>
Alex Light25396132014-08-27 15:37:23 -070022
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070023#include <random>
24
Andreas Gampe46ee31b2016-12-14 10:11:49 -080025#include "android-base/stringprintf.h"
Andreas Gampe9186ced2016-12-12 14:28:21 -080026#include "android-base/strings.h"
27
Andreas Gampe639b2b12019-01-08 10:32:50 -080028#include "arch/instruction_set.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070029#include "art_field-inl.h"
Andreas Gampec6ea7d02017-02-01 16:46:28 -080030#include "art_method-inl.h"
Vladimir Marko0ace5632018-12-14 11:11:47 +000031#include "base/array_ref.h"
Vladimir Marko4df2d802018-09-27 16:42:44 +000032#include "base/bit_memory_region.h"
Andreas Gampe8228cdf2017-05-30 15:03:54 -070033#include "base/callee_save_type.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070034#include "base/enums.h"
David Sehr891a50e2017-10-27 17:01:07 -070035#include "base/file_utils.h"
Ian Rogersc7dd2952014-10-21 23:31:19 -070036#include "base/macros.h"
David Sehrc431b9d2018-03-02 12:01:51 -080037#include "base/os.h"
Narayan Kamathd1c606f2014-06-09 16:50:19 +010038#include "base/scoped_flock.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070039#include "base/stl_util.h"
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080040#include "base/systrace.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010041#include "base/time_utils.h"
David Sehrc431b9d2018-03-02 12:01:51 -080042#include "base/utils.h"
Vladimir Marko4df2d802018-09-27 16:42:44 +000043#include "class_root.h"
David Sehr013fd802018-01-11 22:55:24 -080044#include "dex/art_dex_file_loader.h"
David Sehr9e734c72018-01-04 17:56:19 -080045#include "dex/dex_file_loader.h"
David Sehr97c381e2017-02-01 15:09:58 -080046#include "exec_utils.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070047#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier93c21ba2018-12-10 13:08:30 -080048#include "gc/task_processor.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080049#include "image-inl.h"
Andreas Gampebec63582015-11-20 19:26:51 -080050#include "image_space_fs.h"
Mathieu Chartier74ccee62018-10-10 10:30:29 -070051#include "intern_table-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070052#include "mirror/class-inl.h"
Vladimir Marko0eefb9b2019-03-27 15:04:31 +000053#include "mirror/executable-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070054#include "mirror/object-inl.h"
Andreas Gampec6ea7d02017-02-01 16:46:28 -080055#include "mirror/object-refvisitor-inl.h"
Brian Carlstrom56d947f2013-07-15 13:14:23 -070056#include "oat_file.h"
Andreas Gamped482e732017-04-24 17:59:09 -070057#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070058#include "space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070059
60namespace art {
61namespace gc {
62namespace space {
63
Vladimir Marko0ace5632018-12-14 11:11:47 +000064using android::base::StringAppendF;
Andreas Gampe46ee31b2016-12-14 10:11:49 -080065using android::base::StringPrintf;
66
Ian Rogersef7d42f2014-01-06 12:55:46 -080067Atomic<uint32_t> ImageSpace::bitmap_index_(0);
Ian Rogers1d54e732013-05-02 21:10:01 -070068
Jeff Haodcdc85b2015-12-04 14:06:18 -080069ImageSpace::ImageSpace(const std::string& image_filename,
70 const char* image_location,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010071 MemMap&& mem_map,
Vladimir Markoc09cd052018-08-23 16:36:36 +010072 std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
Mathieu Chartier2d124ec2016-01-05 18:03:15 -080073 uint8_t* end)
74 : MemMapSpace(image_filename,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010075 std::move(mem_map),
76 mem_map.Begin(),
Mathieu Chartier2d124ec2016-01-05 18:03:15 -080077 end,
78 end,
Narayan Kamath52f84882014-05-02 10:10:39 +010079 kGcRetentionPolicyNeverCollect),
Vladimir Markoc09cd052018-08-23 16:36:36 +010080 live_bitmap_(std::move(live_bitmap)),
Jeff Haodcdc85b2015-12-04 14:06:18 -080081 oat_file_non_owned_(nullptr),
Mathieu Chartier2d124ec2016-01-05 18:03:15 -080082 image_location_(image_location) {
Vladimir Markoc09cd052018-08-23 16:36:36 +010083 DCHECK(live_bitmap_ != nullptr);
Ian Rogers1d54e732013-05-02 21:10:01 -070084}
85
Alex Lightcf4bf382014-07-24 11:29:14 -070086static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta) {
87 CHECK_ALIGNED(min_delta, kPageSize);
88 CHECK_ALIGNED(max_delta, kPageSize);
89 CHECK_LT(min_delta, max_delta);
90
Alex Light15324762015-11-19 11:03:10 -080091 int32_t r = GetRandomNumber<int32_t>(min_delta, max_delta);
Alex Lightcf4bf382014-07-24 11:29:14 -070092 if (r % 2 == 0) {
93 r = RoundUp(r, kPageSize);
94 } else {
95 r = RoundDown(r, kPageSize);
96 }
97 CHECK_LE(min_delta, r);
98 CHECK_GE(max_delta, r);
99 CHECK_ALIGNED(r, kPageSize);
100 return r;
101}
102
Andreas Gampea463b6a2016-08-12 21:53:32 -0700103static int32_t ChooseRelocationOffsetDelta() {
104 return ChooseRelocationOffsetDelta(ART_BASE_ADDRESS_MIN_DELTA, ART_BASE_ADDRESS_MAX_DELTA);
105}
106
107static bool GenerateImage(const std::string& image_filename,
108 InstructionSet image_isa,
Alex Light25396132014-08-27 15:37:23 -0700109 std::string* error_msg) {
Vladimir Marko91f10322018-12-07 18:04:10 +0000110 Runtime* runtime = Runtime::Current();
111 const std::vector<std::string>& boot_class_path = runtime->GetBootClassPath();
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700112 if (boot_class_path.empty()) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700113 *error_msg = "Failed to generate image because no boot class path specified";
114 return false;
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700115 }
Alex Light25396132014-08-27 15:37:23 -0700116 // We should clean up so we are more likely to have room for the image.
117 if (Runtime::Current()->IsZygote()) {
Andreas Gampe3c13a792014-09-18 20:56:04 -0700118 LOG(INFO) << "Pruning dalvik-cache since we are generating an image and will need to recompile";
Narayan Kamath28bc9872014-11-07 17:46:28 +0000119 PruneDalvikCache(image_isa);
Alex Light25396132014-08-27 15:37:23 -0700120 }
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700121
Mathieu Chartier8bbc8c02013-07-31 16:27:01 -0700122 std::vector<std::string> arg_vector;
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700123
Tsu Chiang Chuang12e6d742014-05-22 10:22:25 -0700124 std::string dex2oat(Runtime::Current()->GetCompilerExecutable());
Mathieu Chartier08d7d442013-07-31 18:08:51 -0700125 arg_vector.push_back(dex2oat);
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700126
127 std::string image_option_string("--image=");
Narayan Kamath52f84882014-05-02 10:10:39 +0100128 image_option_string += image_filename;
Mathieu Chartier8bbc8c02013-07-31 16:27:01 -0700129 arg_vector.push_back(image_option_string);
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700130
Vladimir Marko91f10322018-12-07 18:04:10 +0000131 const std::vector<std::string>& boot_class_path_locations = runtime->GetBootClassPathLocations();
132 DCHECK_EQ(boot_class_path.size(), boot_class_path_locations.size());
133 for (size_t i = 0u; i < boot_class_path.size(); i++) {
Mathieu Chartier8bbc8c02013-07-31 16:27:01 -0700134 arg_vector.push_back(std::string("--dex-file=") + boot_class_path[i]);
Vladimir Marko91f10322018-12-07 18:04:10 +0000135 arg_vector.push_back(std::string("--dex-location=") + boot_class_path_locations[i]);
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700136 }
137
138 std::string oat_file_option_string("--oat-file=");
Brian Carlstrom2f1e15c2014-10-27 16:27:06 -0700139 oat_file_option_string += ImageHeader::GetOatLocationFromImageLocation(image_filename);
Mathieu Chartier8bbc8c02013-07-31 16:27:01 -0700140 arg_vector.push_back(oat_file_option_string);
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700141
Sebastien Hertz0de11332015-05-13 12:14:05 +0200142 // Note: we do not generate a fully debuggable boot image so we do not pass the
143 // compiler flag --debuggable here.
144
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700145 Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&arg_vector);
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700146 CHECK_EQ(image_isa, kRuntimeISA)
147 << "We should always be generating an image for the current isa.";
Ian Rogers8afeb852014-04-02 14:55:49 -0700148
Andreas Gampea463b6a2016-08-12 21:53:32 -0700149 int32_t base_offset = ChooseRelocationOffsetDelta();
Alex Lightcf4bf382014-07-24 11:29:14 -0700150 LOG(INFO) << "Using an offset of 0x" << std::hex << base_offset << " from default "
151 << "art base address of 0x" << std::hex << ART_BASE_ADDRESS;
152 arg_vector.push_back(StringPrintf("--base=0x%x", ART_BASE_ADDRESS + base_offset));
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700153
Brian Carlstrom57309db2014-07-30 15:13:25 -0700154 if (!kIsTargetBuild) {
Mathieu Chartier8bbc8c02013-07-31 16:27:01 -0700155 arg_vector.push_back("--host");
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700156 }
157
Brian Carlstrom6449c622014-02-10 23:48:36 -0800158 const std::vector<std::string>& compiler_options = Runtime::Current()->GetImageCompilerOptions();
Brian Carlstrom2ec65202014-03-03 15:16:37 -0800159 for (size_t i = 0; i < compiler_options.size(); ++i) {
Brian Carlstrom6449c622014-02-10 23:48:36 -0800160 arg_vector.push_back(compiler_options[i].c_str());
161 }
162
Andreas Gampe9186ced2016-12-12 14:28:21 -0800163 std::string command_line(android::base::Join(arg_vector, ' '));
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700164 LOG(INFO) << "GenerateImage: " << command_line;
Brian Carlstrom6449c622014-02-10 23:48:36 -0800165 return Exec(arg_vector, error_msg);
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700166}
167
Andreas Gampea463b6a2016-08-12 21:53:32 -0700168static bool FindImageFilenameImpl(const char* image_location,
169 const InstructionSet image_isa,
170 bool* has_system,
171 std::string* system_filename,
172 bool* dalvik_cache_exists,
173 std::string* dalvik_cache,
174 bool* is_global_cache,
175 bool* has_cache,
176 std::string* cache_filename) {
177 DCHECK(dalvik_cache != nullptr);
178
Alex Lighta59dd802014-07-02 16:28:08 -0700179 *has_system = false;
180 *has_cache = false;
Brian Carlstrom0e12bdc2014-05-14 17:44:28 -0700181 // image_location = /system/framework/boot.art
182 // system_image_location = /system/framework/<image_isa>/boot.art
183 std::string system_image_filename(GetSystemImageFilename(image_location, image_isa));
184 if (OS::FileExists(system_image_filename.c_str())) {
Alex Lighta59dd802014-07-02 16:28:08 -0700185 *system_filename = system_image_filename;
186 *has_system = true;
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700187 }
Narayan Kamath52f84882014-05-02 10:10:39 +0100188
Alex Lighta59dd802014-07-02 16:28:08 -0700189 bool have_android_data = false;
190 *dalvik_cache_exists = false;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700191 GetDalvikCache(GetInstructionSetString(image_isa),
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100192 /*create_if_absent=*/ true,
Andreas Gampea463b6a2016-08-12 21:53:32 -0700193 dalvik_cache,
194 &have_android_data,
195 dalvik_cache_exists,
196 is_global_cache);
Narayan Kamath52f84882014-05-02 10:10:39 +0100197
Vladimir Marko82e1e272018-08-20 13:38:06 +0000198 if (*dalvik_cache_exists) {
199 DCHECK(have_android_data);
Alex Lighta59dd802014-07-02 16:28:08 -0700200 // Always set output location even if it does not exist,
201 // so that the caller knows where to create the image.
202 //
203 // image_location = /system/framework/boot.art
Vladimir Marko82e1e272018-08-20 13:38:06 +0000204 // *image_filename = /data/dalvik-cache/<image_isa>/system@framework@boot.art
Alex Lighta59dd802014-07-02 16:28:08 -0700205 std::string error_msg;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700206 if (!GetDalvikCacheFilename(image_location,
207 dalvik_cache->c_str(),
208 cache_filename,
209 &error_msg)) {
Alex Lighta59dd802014-07-02 16:28:08 -0700210 LOG(WARNING) << error_msg;
211 return *has_system;
212 }
213 *has_cache = OS::FileExists(cache_filename->c_str());
214 }
215 return *has_system || *has_cache;
216}
217
Andreas Gampea463b6a2016-08-12 21:53:32 -0700218bool ImageSpace::FindImageFilename(const char* image_location,
219 const InstructionSet image_isa,
220 std::string* system_filename,
221 bool* has_system,
222 std::string* cache_filename,
223 bool* dalvik_cache_exists,
224 bool* has_cache,
225 bool* is_global_cache) {
226 std::string dalvik_cache_unused;
227 return FindImageFilenameImpl(image_location,
228 image_isa,
229 has_system,
230 system_filename,
231 dalvik_cache_exists,
232 &dalvik_cache_unused,
233 is_global_cache,
234 has_cache,
235 cache_filename);
236}
237
Alex Lighta59dd802014-07-02 16:28:08 -0700238static bool ReadSpecificImageHeader(const char* filename, ImageHeader* image_header) {
239 std::unique_ptr<File> image_file(OS::OpenFileForReading(filename));
240 if (image_file.get() == nullptr) {
241 return false;
242 }
243 const bool success = image_file->ReadFully(image_header, sizeof(ImageHeader));
244 if (!success || !image_header->IsValid()) {
245 return false;
246 }
247 return true;
248}
249
Vladimir Marko4df2d802018-09-27 16:42:44 +0000250static std::unique_ptr<ImageHeader> ReadSpecificImageHeader(const char* filename,
251 std::string* error_msg) {
Alex Lighta59dd802014-07-02 16:28:08 -0700252 std::unique_ptr<ImageHeader> hdr(new ImageHeader);
253 if (!ReadSpecificImageHeader(filename, hdr.get())) {
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700254 *error_msg = StringPrintf("Unable to read image header for %s", filename);
Alex Lighta59dd802014-07-02 16:28:08 -0700255 return nullptr;
256 }
Vladimir Marko4df2d802018-09-27 16:42:44 +0000257 return hdr;
Narayan Kamath52f84882014-05-02 10:10:39 +0100258}
259
Vladimir Marko4df2d802018-09-27 16:42:44 +0000260std::unique_ptr<ImageHeader> ImageSpace::ReadImageHeader(const char* image_location,
261 const InstructionSet image_isa,
Andreas Gampe86823542019-02-25 09:38:49 -0800262 ImageSpaceLoadingOrder order,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000263 std::string* error_msg) {
Alex Lighta59dd802014-07-02 16:28:08 -0700264 std::string system_filename;
265 bool has_system = false;
266 std::string cache_filename;
267 bool has_cache = false;
268 bool dalvik_cache_exists = false;
Andreas Gampe3c13a792014-09-18 20:56:04 -0700269 bool is_global_cache = false;
Vladimir Marko4df2d802018-09-27 16:42:44 +0000270 if (FindImageFilename(image_location,
271 image_isa,
272 &system_filename,
273 &has_system,
274 &cache_filename,
275 &dalvik_cache_exists,
276 &has_cache,
277 &is_global_cache)) {
Andreas Gampe86823542019-02-25 09:38:49 -0800278 if (order == ImageSpaceLoadingOrder::kSystemFirst) {
279 if (has_system) {
280 return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
281 }
282 if (has_cache) {
283 return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
284 }
285 } else {
286 if (has_cache) {
287 return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
288 }
289 if (has_system) {
290 return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
291 }
Narayan Kamath52f84882014-05-02 10:10:39 +0100292 }
Narayan Kamath52f84882014-05-02 10:10:39 +0100293 }
294
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700295 *error_msg = StringPrintf("Unable to find image file for %s", image_location);
Narayan Kamath52f84882014-05-02 10:10:39 +0100296 return nullptr;
297}
298
Robert Sesekbfa1f8d2016-08-15 15:21:09 -0400299static bool CanWriteToDalvikCache(const InstructionSet isa) {
300 const std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(isa));
301 if (access(dalvik_cache.c_str(), O_RDWR) == 0) {
302 return true;
303 } else if (errno != EACCES) {
304 PLOG(WARNING) << "CanWriteToDalvikCache returned error other than EACCES";
305 }
306 return false;
307}
308
309static bool ImageCreationAllowed(bool is_global_cache,
310 const InstructionSet isa,
Vladimir Marko3364d182019-03-13 13:55:01 +0000311 bool is_zygote,
Robert Sesekbfa1f8d2016-08-15 15:21:09 -0400312 std::string* error_msg) {
Andreas Gampe3c13a792014-09-18 20:56:04 -0700313 // Anyone can write into a "local" cache.
314 if (!is_global_cache) {
315 return true;
316 }
317
Robert Sesekbfa1f8d2016-08-15 15:21:09 -0400318 // Only the zygote running as root is allowed to create the global boot image.
319 // If the zygote is running as non-root (and cannot write to the dalvik-cache),
320 // then image creation is not allowed..
Vladimir Marko3364d182019-03-13 13:55:01 +0000321 if (is_zygote) {
Robert Sesekbfa1f8d2016-08-15 15:21:09 -0400322 return CanWriteToDalvikCache(isa);
Andreas Gampe3c13a792014-09-18 20:56:04 -0700323 }
324
325 *error_msg = "Only the zygote can create the global boot image.";
326 return false;
327}
328
Mathieu Chartier31e89252013-08-28 11:29:12 -0700329void ImageSpace::VerifyImageAllocations() {
Ian Rogers13735952014-10-08 12:43:28 -0700330 uint8_t* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Mathieu Chartier31e89252013-08-28 11:29:12 -0700331 while (current < End()) {
Mathieu Chartierc7853442015-03-27 14:35:38 -0700332 CHECK_ALIGNED(current, kObjectAlignment);
333 auto* obj = reinterpret_cast<mirror::Object*>(current);
Mathieu Chartier31e89252013-08-28 11:29:12 -0700334 CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
David Sehr709b0702016-10-13 09:12:37 -0700335 CHECK(live_bitmap_->Test(obj)) << obj->PrettyTypeOf();
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700336 if (kUseBakerReadBarrier) {
337 obj->AssertReadBarrierState();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800338 }
Mathieu Chartier31e89252013-08-28 11:29:12 -0700339 current += RoundUp(obj->SizeOf(), kObjectAlignment);
340 }
341}
342
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800343// Helper class for relocating from one range of memory to another.
344class RelocationRange {
345 public:
346 RelocationRange() = default;
347 RelocationRange(const RelocationRange&) = default;
348 RelocationRange(uintptr_t source, uintptr_t dest, uintptr_t length)
349 : source_(source),
350 dest_(dest),
351 length_(length) {}
352
Mathieu Chartier91edc622016-02-16 17:16:01 -0800353 bool InSource(uintptr_t address) const {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800354 return address - source_ < length_;
355 }
356
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800357 bool InDest(const void* dest) const {
358 return InDest(reinterpret_cast<uintptr_t>(dest));
359 }
360
Mathieu Chartier91edc622016-02-16 17:16:01 -0800361 bool InDest(uintptr_t address) const {
362 return address - dest_ < length_;
363 }
364
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800365 // Translate a source address to the destination space.
366 uintptr_t ToDest(uintptr_t address) const {
Mathieu Chartier91edc622016-02-16 17:16:01 -0800367 DCHECK(InSource(address));
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800368 return address + Delta();
369 }
370
371 // Returns the delta between the dest from the source.
Mathieu Chartier0b4cbd02016-03-08 16:49:58 -0800372 uintptr_t Delta() const {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800373 return dest_ - source_;
374 }
375
376 uintptr_t Source() const {
377 return source_;
378 }
379
380 uintptr_t Dest() const {
381 return dest_;
382 }
383
384 uintptr_t Length() const {
385 return length_;
386 }
387
388 private:
389 const uintptr_t source_;
390 const uintptr_t dest_;
391 const uintptr_t length_;
392};
393
Mathieu Chartier0b4cbd02016-03-08 16:49:58 -0800394std::ostream& operator<<(std::ostream& os, const RelocationRange& reloc) {
395 return os << "(" << reinterpret_cast<const void*>(reloc.Source()) << "-"
396 << reinterpret_cast<const void*>(reloc.Source() + reloc.Length()) << ")->("
397 << reinterpret_cast<const void*>(reloc.Dest()) << "-"
398 << reinterpret_cast<const void*>(reloc.Dest() + reloc.Length()) << ")";
399}
400
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800401template <PointerSize kPointerSize, typename HeapVisitor, typename NativeVisitor>
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800402class ImageSpace::PatchObjectVisitor final {
403 public:
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800404 explicit PatchObjectVisitor(HeapVisitor heap_visitor, NativeVisitor native_visitor)
405 : heap_visitor_(heap_visitor), native_visitor_(native_visitor) {}
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800406
407 void VisitClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
408 // A mirror::Class object consists of
409 // - instance fields inherited from j.l.Object,
410 // - instance fields inherited from j.l.Class,
411 // - embedded tables (vtable, interface method table),
412 // - static fields of the class itself.
413 // The reference fields are at the start of each field section (this is how the
414 // ClassLinker orders fields; except when that would create a gap between superclass
415 // fields and the first reference of the subclass due to alignment, it can be filled
416 // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
417
418 DCHECK_ALIGNED(klass, kObjectAlignment);
419 static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
420 // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
421 // This should be the only reference field in j.l.Object and we assert that below.
422 PatchReferenceField</*kMayBeNull=*/ false>(klass, mirror::Object::ClassOffset());
423 // Then patch the reference instance fields described by j.l.Class.class.
424 // Use the sizeof(Object) to determine where these reference fields start;
425 // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
426 // after patching but the j.l.Class may not have been patched yet.
427 mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
428 size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
429 DCHECK_NE(num_reference_instance_fields, 0u);
430 static_assert(IsAligned<kHeapReferenceSize>(sizeof(mirror::Object)), "Size alignment check.");
431 MemberOffset instance_field_offset(sizeof(mirror::Object));
432 for (size_t i = 0; i != num_reference_instance_fields; ++i) {
433 PatchReferenceField(klass, instance_field_offset);
434 static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
435 "Heap reference sizes equality check.");
436 instance_field_offset =
437 MemberOffset(instance_field_offset.Uint32Value() + kHeapReferenceSize);
438 }
439 // Now that we have patched the `super_class_`, if this is the j.l.Class.class,
440 // we can get a reference to j.l.Object.class and assert that it has only one
441 // reference instance field (the `klass_` patched above).
442 if (kIsDebugBuild && klass == class_class) {
443 ObjPtr<mirror::Class> object_class =
444 klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
445 CHECK_EQ(object_class->NumReferenceInstanceFields<kVerifyNone>(), 1u);
446 }
447 // Then patch static fields.
448 size_t num_reference_static_fields = klass->NumReferenceStaticFields<kVerifyNone>();
449 if (num_reference_static_fields != 0u) {
450 MemberOffset static_field_offset =
451 klass->GetFirstReferenceStaticFieldOffset<kVerifyNone>(kPointerSize);
452 for (size_t i = 0; i != num_reference_static_fields; ++i) {
453 PatchReferenceField(klass, static_field_offset);
454 static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
455 "Heap reference sizes equality check.");
456 static_field_offset =
457 MemberOffset(static_field_offset.Uint32Value() + kHeapReferenceSize);
458 }
459 }
460 // Then patch native pointers.
461 klass->FixupNativePointers<kVerifyNone>(klass, kPointerSize, *this);
462 }
463
464 template <typename T>
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800465 T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const {
466 return (ptr != nullptr) ? native_visitor_(ptr) : nullptr;
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800467 }
468
Vladimir Markoc524e9e2019-03-26 10:54:50 +0000469 void VisitPointerArray(ObjPtr<mirror::PointerArray> pointer_array)
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800470 REQUIRES_SHARED(Locks::mutator_lock_) {
471 // Fully patch the pointer array, including the `klass_` field.
472 PatchReferenceField</*kMayBeNull=*/ false>(pointer_array, mirror::Object::ClassOffset());
473
474 int32_t length = pointer_array->GetLength<kVerifyNone>();
475 for (int32_t i = 0; i != length; ++i) {
476 ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(
477 pointer_array->ElementAddress<kVerifyNone>(i, kPointerSize));
478 PatchNativePointer</*kMayBeNull=*/ false>(method_entry);
479 }
480 }
481
482 void VisitObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
483 // Visit all reference fields.
484 object->VisitReferences</*kVisitNativeRoots=*/ false,
485 kVerifyNone,
486 kWithoutReadBarrier>(*this, *this);
487 // This function should not be called for classes.
488 DCHECK(!object->IsClass<kVerifyNone>());
489 }
490
491 // Visitor for VisitReferences().
Vladimir Marko4617d582019-03-28 13:48:31 +0000492 ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> object,
493 MemberOffset field_offset,
494 bool is_static)
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800495 const REQUIRES_SHARED(Locks::mutator_lock_) {
496 DCHECK(!is_static);
497 PatchReferenceField(object, field_offset);
498 }
499 // Visitor for VisitReferences(), java.lang.ref.Reference case.
Vladimir Marko4617d582019-03-28 13:48:31 +0000500 ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800501 REQUIRES_SHARED(Locks::mutator_lock_) {
502 DCHECK(klass->IsTypeOfReferenceClass());
503 this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
504 }
505 // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
506 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
507 const {}
508 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
509
Vladimir Marko423bebb2019-03-26 15:17:21 +0000510 void VisitDexCacheArrays(ObjPtr<mirror::DexCache> dex_cache)
511 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800512 FixupDexCacheArray<mirror::StringDexCacheType>(dex_cache,
513 mirror::DexCache::StringsOffset(),
514 dex_cache->NumStrings<kVerifyNone>());
515 FixupDexCacheArray<mirror::TypeDexCacheType>(dex_cache,
516 mirror::DexCache::ResolvedTypesOffset(),
517 dex_cache->NumResolvedTypes<kVerifyNone>());
518 FixupDexCacheArray<mirror::MethodDexCacheType>(dex_cache,
519 mirror::DexCache::ResolvedMethodsOffset(),
520 dex_cache->NumResolvedMethods<kVerifyNone>());
521 FixupDexCacheArray<mirror::FieldDexCacheType>(dex_cache,
522 mirror::DexCache::ResolvedFieldsOffset(),
523 dex_cache->NumResolvedFields<kVerifyNone>());
524 FixupDexCacheArray<mirror::MethodTypeDexCacheType>(
525 dex_cache,
526 mirror::DexCache::ResolvedMethodTypesOffset(),
527 dex_cache->NumResolvedMethodTypes<kVerifyNone>());
528 FixupDexCacheArray<GcRoot<mirror::CallSite>>(
529 dex_cache,
530 mirror::DexCache::ResolvedCallSitesOffset(),
531 dex_cache->NumResolvedCallSites<kVerifyNone>());
532 FixupDexCacheArray<GcRoot<mirror::String>>(
533 dex_cache,
534 mirror::DexCache::PreResolvedStringsOffset(),
535 dex_cache->NumPreResolvedStrings<kVerifyNone>());
536 }
537
538 template <bool kMayBeNull = true, typename T>
539 ALWAYS_INLINE void PatchGcRoot(/*inout*/GcRoot<T>* root) const
540 REQUIRES_SHARED(Locks::mutator_lock_) {
541 static_assert(sizeof(GcRoot<mirror::Class*>) == sizeof(uint32_t), "GcRoot size check");
542 T* old_value = root->template Read<kWithoutReadBarrier>();
543 DCHECK(kMayBeNull || old_value != nullptr);
544 if (!kMayBeNull || old_value != nullptr) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800545 *root = GcRoot<T>(heap_visitor_(old_value));
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800546 }
547 }
548
549 template <bool kMayBeNull = true, typename T>
550 ALWAYS_INLINE void PatchNativePointer(/*inout*/T** entry) const {
551 if (kPointerSize == PointerSize::k64) {
552 uint64_t* raw_entry = reinterpret_cast<uint64_t*>(entry);
553 T* old_value = reinterpret_cast64<T*>(*raw_entry);
554 DCHECK(kMayBeNull || old_value != nullptr);
555 if (!kMayBeNull || old_value != nullptr) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800556 T* new_value = native_visitor_(old_value);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800557 *raw_entry = reinterpret_cast64<uint64_t>(new_value);
558 }
559 } else {
560 uint32_t* raw_entry = reinterpret_cast<uint32_t*>(entry);
561 T* old_value = reinterpret_cast32<T*>(*raw_entry);
562 DCHECK(kMayBeNull || old_value != nullptr);
563 if (!kMayBeNull || old_value != nullptr) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800564 T* new_value = native_visitor_(old_value);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800565 *raw_entry = reinterpret_cast32<uint32_t>(new_value);
566 }
567 }
568 }
569
570 template <bool kMayBeNull = true>
Vladimir Markoc524e9e2019-03-26 10:54:50 +0000571 ALWAYS_INLINE void PatchReferenceField(ObjPtr<mirror::Object> object, MemberOffset offset) const
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800572 REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Markoc524e9e2019-03-26 10:54:50 +0000573 ObjPtr<mirror::Object> old_value =
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800574 object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
575 DCHECK(kMayBeNull || old_value != nullptr);
576 if (!kMayBeNull || old_value != nullptr) {
Vladimir Markoc524e9e2019-03-26 10:54:50 +0000577 ObjPtr<mirror::Object> new_value = heap_visitor_(old_value.Ptr());
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800578 object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
579 /*kCheckTransaction=*/ true,
580 kVerifyNone>(offset, new_value);
581 }
582 }
583
584 template <typename T>
585 void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* array, uint32_t index)
586 REQUIRES_SHARED(Locks::mutator_lock_) {
587 static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
588 "Size check for removing std::atomic<>.");
589 PatchGcRoot(&(reinterpret_cast<mirror::DexCachePair<T>*>(array)[index].object));
590 }
591
592 template <typename T>
593 void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* array, uint32_t index)
594 REQUIRES_SHARED(Locks::mutator_lock_) {
595 static_assert(sizeof(std::atomic<mirror::NativeDexCachePair<T>>) ==
596 sizeof(mirror::NativeDexCachePair<T>),
597 "Size check for removing std::atomic<>.");
598 mirror::NativeDexCachePair<T> pair =
599 mirror::DexCache::GetNativePairPtrSize(array, index, kPointerSize);
600 if (pair.object != nullptr) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800601 pair.object = native_visitor_(pair.object);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800602 mirror::DexCache::SetNativePairPtrSize(array, index, pair, kPointerSize);
603 }
604 }
605
606 void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* array, uint32_t index)
607 REQUIRES_SHARED(Locks::mutator_lock_) {
608 PatchGcRoot(&array[index]);
609 }
610
611 void FixupDexCacheArrayEntry(GcRoot<mirror::String>* array, uint32_t index)
612 REQUIRES_SHARED(Locks::mutator_lock_) {
613 PatchGcRoot(&array[index]);
614 }
615
616 template <typename EntryType>
Vladimir Marko423bebb2019-03-26 15:17:21 +0000617 void FixupDexCacheArray(ObjPtr<mirror::DexCache> dex_cache,
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800618 MemberOffset array_offset,
619 uint32_t size) REQUIRES_SHARED(Locks::mutator_lock_) {
620 EntryType* old_array =
621 reinterpret_cast64<EntryType*>(dex_cache->GetField64<kVerifyNone>(array_offset));
622 DCHECK_EQ(old_array != nullptr, size != 0u);
623 if (old_array != nullptr) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800624 EntryType* new_array = native_visitor_(old_array);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800625 dex_cache->SetField64<kVerifyNone>(array_offset, reinterpret_cast64<uint64_t>(new_array));
626 for (uint32_t i = 0; i != size; ++i) {
627 FixupDexCacheArrayEntry(new_array, i);
628 }
629 }
630 }
631
632 private:
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800633 // Heap objects visitor.
634 HeapVisitor heap_visitor_;
635
636 // Native objects visitor.
637 NativeVisitor native_visitor_;
Mathieu Chartierd3f037b2018-12-06 23:50:56 -0800638};
639
Mathieu Chartier25602dc2018-12-11 11:31:57 -0800640template <typename ReferenceVisitor>
641class ImageSpace::ClassTableVisitor final {
642 public:
643 explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor)
644 : reference_visitor_(reference_visitor) {}
645
646 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
647 REQUIRES_SHARED(Locks::mutator_lock_) {
648 DCHECK(root->AsMirrorPtr() != nullptr);
649 root->Assign(reference_visitor_(root->AsMirrorPtr()));
650 }
651
652 private:
653 ReferenceVisitor reference_visitor_;
654};
655
Andreas Gampea463b6a2016-08-12 21:53:32 -0700656// Helper class encapsulating loading, so we can access private ImageSpace members (this is a
Vladimir Markoc09cd052018-08-23 16:36:36 +0100657// nested class), but not declare functions in the header.
Vladimir Marko82e1e272018-08-20 13:38:06 +0000658class ImageSpace::Loader {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800659 public:
Vladimir Marko4df2d802018-09-27 16:42:44 +0000660 static std::unique_ptr<ImageSpace> InitAppImage(const char* image_filename,
661 const char* image_location,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000662 const OatFile* oat_file,
663 /*inout*/MemMap* image_reservation,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000664 /*out*/std::string* error_msg)
665 REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100666 TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
Mathieu Chartier3ea43222018-12-08 20:44:50 -0800667
Vladimir Marko4df2d802018-09-27 16:42:44 +0000668 std::unique_ptr<ImageSpace> space = Init(image_filename,
669 image_location,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000670 oat_file,
671 &logger,
672 image_reservation,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000673 error_msg);
674 if (space != nullptr) {
Vladimir Marko7391c8c2018-11-21 17:58:44 +0000675 uint32_t expected_reservation_size =
676 RoundUp(space->GetImageHeader().GetImageSize(), kPageSize);
677 if (!CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
678 !CheckImageComponentCount(*space, /*expected_component_count=*/ 1u, error_msg)) {
679 return nullptr;
680 }
681
Vladimir Marko4df2d802018-09-27 16:42:44 +0000682 TimingLogger::ScopedTiming timing("RelocateImage", &logger);
683 ImageHeader* image_header = reinterpret_cast<ImageHeader*>(space->GetMemMap()->Begin());
Mathieu Chartier25602dc2018-12-11 11:31:57 -0800684 const PointerSize pointer_size = image_header->GetPointerSize();
685 bool result;
686 if (pointer_size == PointerSize::k64) {
687 result = RelocateInPlace<PointerSize::k64>(*image_header,
688 space->GetMemMap()->Begin(),
689 space->GetLiveBitmap(),
690 oat_file,
691 error_msg);
692 } else {
693 result = RelocateInPlace<PointerSize::k32>(*image_header,
694 space->GetMemMap()->Begin(),
695 space->GetLiveBitmap(),
696 oat_file,
697 error_msg);
698 }
699 if (!result) {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000700 return nullptr;
701 }
702 Runtime* runtime = Runtime::Current();
703 CHECK_EQ(runtime->GetResolutionMethod(),
704 image_header->GetImageMethod(ImageHeader::kResolutionMethod));
705 CHECK_EQ(runtime->GetImtConflictMethod(),
706 image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
707 CHECK_EQ(runtime->GetImtUnimplementedMethod(),
708 image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
709 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves),
710 image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod));
711 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly),
712 image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod));
713 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs),
714 image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod));
715 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything),
716 image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
717 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForClinit),
718 image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit));
719 CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForSuspendCheck),
720 image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck));
721
722 VLOG(image) << "ImageSpace::Loader::InitAppImage exiting " << *space.get();
Mathieu Chartier3f1fec62018-10-17 09:14:05 -0700723 }
724 if (VLOG_IS_ON(image)) {
725 logger.Dump(LOG_STREAM(INFO));
Vladimir Marko4df2d802018-09-27 16:42:44 +0000726 }
727 return space;
728 }
729
Andreas Gampea463b6a2016-08-12 21:53:32 -0700730 static std::unique_ptr<ImageSpace> Init(const char* image_filename,
731 const char* image_location,
Andreas Gampea463b6a2016-08-12 21:53:32 -0700732 const OatFile* oat_file,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000733 TimingLogger* logger,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100734 /*inout*/MemMap* image_reservation,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100735 /*out*/std::string* error_msg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700736 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700737 CHECK(image_filename != nullptr);
738 CHECK(image_location != nullptr);
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800739
Andreas Gampea463b6a2016-08-12 21:53:32 -0700740 VLOG(image) << "ImageSpace::Init entering image_filename=" << image_filename;
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800741
Andreas Gampea463b6a2016-08-12 21:53:32 -0700742 std::unique_ptr<File> file;
Mathieu Chartier92ec5942016-04-11 12:03:48 -0700743 {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000744 TimingLogger::ScopedTiming timing("OpenImageFile", logger);
Andreas Gampea463b6a2016-08-12 21:53:32 -0700745 file.reset(OS::OpenFileForReading(image_filename));
746 if (file == nullptr) {
747 *error_msg = StringPrintf("Failed to open '%s'", image_filename);
748 return nullptr;
Mathieu Chartier92ec5942016-04-11 12:03:48 -0700749 }
Andreas Gampea463b6a2016-08-12 21:53:32 -0700750 }
751 ImageHeader temp_image_header;
752 ImageHeader* image_header = &temp_image_header;
753 {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000754 TimingLogger::ScopedTiming timing("ReadImageHeader", logger);
Andreas Gampea463b6a2016-08-12 21:53:32 -0700755 bool success = file->ReadFully(image_header, sizeof(*image_header));
756 if (!success || !image_header->IsValid()) {
757 *error_msg = StringPrintf("Invalid image header in '%s'", image_filename);
758 return nullptr;
759 }
760 }
761 // Check that the file is larger or equal to the header size + data size.
762 const uint64_t image_file_size = static_cast<uint64_t>(file->GetLength());
763 if (image_file_size < sizeof(ImageHeader) + image_header->GetDataSize()) {
Mathieu Chartier1a842962018-11-13 15:09:51 -0800764 *error_msg = StringPrintf(
765 "Image file truncated: %" PRIu64 " vs. %" PRIu64 ".",
766 image_file_size,
767 static_cast<uint64_t>(sizeof(ImageHeader) + image_header->GetDataSize()));
Andreas Gampea463b6a2016-08-12 21:53:32 -0700768 return nullptr;
769 }
770
771 if (oat_file != nullptr) {
Vladimir Marko312f10e2018-11-21 12:35:24 +0000772 // If we have an oat file (i.e. for app image), check the oat file checksum.
773 // Otherwise, we open the oat file after the image and check the checksum there.
Andreas Gampea463b6a2016-08-12 21:53:32 -0700774 const uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
775 const uint32_t image_oat_checksum = image_header->GetOatChecksum();
776 if (oat_checksum != image_oat_checksum) {
777 *error_msg = StringPrintf("Oat checksum 0x%x does not match the image one 0x%x in image %s",
778 oat_checksum,
779 image_oat_checksum,
780 image_filename);
781 return nullptr;
Mathieu Chartier92ec5942016-04-11 12:03:48 -0700782 }
783 }
784
Andreas Gampea463b6a2016-08-12 21:53:32 -0700785 if (VLOG_IS_ON(startup)) {
786 LOG(INFO) << "Dumping image sections";
787 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
788 const auto section_idx = static_cast<ImageHeader::ImageSections>(i);
789 auto& section = image_header->GetImageSection(section_idx);
790 LOG(INFO) << section_idx << " start="
791 << reinterpret_cast<void*>(image_header->GetImageBegin() + section.Offset()) << " "
792 << section;
Mathieu Chartier92ec5942016-04-11 12:03:48 -0700793 }
Andreas Gampea463b6a2016-08-12 21:53:32 -0700794 }
795
Vladimir Markocd87c3e2017-09-05 13:11:57 +0100796 const auto& bitmap_section = image_header->GetImageBitmapSection();
Andreas Gampea463b6a2016-08-12 21:53:32 -0700797 // The location we want to map from is the first aligned page after the end of the stored
798 // (possibly compressed) data.
799 const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
800 kPageSize);
801 const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size();
Vladimir Markod68ab242018-10-18 16:07:10 +0100802 if (end_of_bitmap != image_file_size) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700803 *error_msg = StringPrintf(
Vladimir Markod68ab242018-10-18 16:07:10 +0100804 "Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.",
Vladimir Marko6121aa62018-07-06 10:04:35 +0100805 image_file_size,
Vladimir Markod68ab242018-10-18 16:07:10 +0100806 end_of_bitmap);
Andreas Gampea463b6a2016-08-12 21:53:32 -0700807 return nullptr;
808 }
809
Andreas Gampea463b6a2016-08-12 21:53:32 -0700810 // GetImageBegin is the preferred address to map the image. If we manage to map the
811 // image at the image begin, the amount of fixup work required is minimized.
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800812 // If it is pic we will retry with error_msg for the2 failure case. Pass a null error_msg to
Mathieu Chartier66b1d572017-02-10 18:41:39 -0800813 // avoid reading proc maps for a mapping failure and slowing everything down.
Vladimir Markoc09cd052018-08-23 16:36:36 +0100814 // For the boot image, we have already reserved the memory and we load the image
815 // into the `image_reservation`.
Vladimir Marko312f10e2018-11-21 12:35:24 +0000816 MemMap map = LoadImageFile(
Vladimir Markoc09cd052018-08-23 16:36:36 +0100817 image_filename,
818 image_location,
819 *image_header,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100820 file->Fd(),
821 logger,
822 image_reservation,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000823 error_msg);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100824 if (!map.IsValid()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700825 DCHECK(!error_msg->empty());
826 return nullptr;
827 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100828 DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
Andreas Gampea463b6a2016-08-12 21:53:32 -0700829
Vladimir Markoc09cd052018-08-23 16:36:36 +0100830 MemMap image_bitmap_map = MemMap::MapFile(bitmap_section.Size(),
Vladimir Marko4df2d802018-09-27 16:42:44 +0000831 PROT_READ,
832 MAP_PRIVATE,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100833 file->Fd(),
834 image_bitmap_offset,
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100835 /*low_4gb=*/ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100836 image_filename,
837 error_msg);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100838 if (!image_bitmap_map.IsValid()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700839 *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
840 return nullptr;
841 }
842 // Loaded the map, use the image header from the file now in case we patch it with
843 // RelocateInPlace.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100844 image_header = reinterpret_cast<ImageHeader*>(map.Begin());
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700845 const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1);
Andreas Gampea463b6a2016-08-12 21:53:32 -0700846 std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
847 image_filename,
848 bitmap_index));
849 // Bitmap only needs to cover until the end of the mirror objects section.
Vladimir Markocd87c3e2017-09-05 13:11:57 +0100850 const ImageSection& image_objects = image_header->GetObjectsSection();
Andreas Gampea463b6a2016-08-12 21:53:32 -0700851 // We only want the mirror object, not the ArtFields and ArtMethods.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100852 uint8_t* const image_end = map.Begin() + image_objects.End();
Andreas Gampea463b6a2016-08-12 21:53:32 -0700853 std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap;
854 {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000855 TimingLogger::ScopedTiming timing("CreateImageBitmap", logger);
Andreas Gampea463b6a2016-08-12 21:53:32 -0700856 bitmap.reset(
857 accounting::ContinuousSpaceBitmap::CreateFromMemMap(
858 bitmap_name,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100859 std::move(image_bitmap_map),
860 reinterpret_cast<uint8_t*>(map.Begin()),
Mathieu Chartier612ff542017-05-01 09:59:24 -0700861 // Make sure the bitmap is aligned to card size instead of just bitmap word size.
862 RoundUp(image_objects.End(), gc::accounting::CardTable::kCardSize)));
Andreas Gampea463b6a2016-08-12 21:53:32 -0700863 if (bitmap == nullptr) {
864 *error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
865 return nullptr;
866 }
867 }
Andreas Gampea463b6a2016-08-12 21:53:32 -0700868 // We only want the mirror object, not the ArtFields and ArtMethods.
869 std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
870 image_location,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100871 std::move(map),
Vladimir Markoc09cd052018-08-23 16:36:36 +0100872 std::move(bitmap),
Andreas Gampea463b6a2016-08-12 21:53:32 -0700873 image_end));
Vladimir Marko312f10e2018-11-21 12:35:24 +0000874 space->oat_file_non_owned_ = oat_file;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700875 return space;
876 }
877
Vladimir Marko7391c8c2018-11-21 17:58:44 +0000878 static bool CheckImageComponentCount(const ImageSpace& space,
879 uint32_t expected_component_count,
880 /*out*/std::string* error_msg) {
881 const ImageHeader& header = space.GetImageHeader();
882 if (header.GetComponentCount() != expected_component_count) {
883 *error_msg = StringPrintf("Unexpected component count in %s, received %u, expected %u",
884 space.GetImageFilename().c_str(),
885 header.GetComponentCount(),
886 expected_component_count);
887 return false;
888 }
889 return true;
890 }
891
892 static bool CheckImageReservationSize(const ImageSpace& space,
893 uint32_t expected_reservation_size,
894 /*out*/std::string* error_msg) {
895 const ImageHeader& header = space.GetImageHeader();
896 if (header.GetImageReservationSize() != expected_reservation_size) {
897 *error_msg = StringPrintf("Unexpected reservation size in %s, received %u, expected %u",
898 space.GetImageFilename().c_str(),
899 header.GetImageReservationSize(),
900 expected_reservation_size);
901 return false;
902 }
903 return true;
904 }
905
Andreas Gampea463b6a2016-08-12 21:53:32 -0700906 private:
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100907 static MemMap LoadImageFile(const char* image_filename,
908 const char* image_location,
909 const ImageHeader& image_header,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100910 int fd,
Vladimir Marko4df2d802018-09-27 16:42:44 +0000911 TimingLogger* logger,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100912 /*inout*/MemMap* image_reservation,
Mathieu Chartierf1890fd2019-04-30 17:35:08 -0700913 /*out*/std::string* error_msg)
914 REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000915 TimingLogger::ScopedTiming timing("MapImageFile", logger);
Mathieu Chartier1a842962018-11-13 15:09:51 -0800916 std::string temp_error_msg;
917 const bool is_compressed = image_header.HasCompressedBlock();
918 if (!is_compressed) {
Vladimir Marko11306592018-10-26 14:22:59 +0100919 uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700920 return MemMap::MapFileAtAddress(address,
921 image_header.GetImageSize(),
922 PROT_READ | PROT_WRITE,
923 MAP_PRIVATE,
924 fd,
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100925 /*start=*/ 0,
926 /*low_4gb=*/ true,
Andreas Gampea463b6a2016-08-12 21:53:32 -0700927 image_filename,
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100928 /*reuse=*/ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100929 image_reservation,
Andreas Gampea463b6a2016-08-12 21:53:32 -0700930 error_msg);
931 }
932
Andreas Gampea463b6a2016-08-12 21:53:32 -0700933 // Reserve output and decompress into it.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100934 MemMap map = MemMap::MapAnonymous(image_location,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100935 image_header.GetImageSize(),
936 PROT_READ | PROT_WRITE,
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100937 /*low_4gb=*/ true,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100938 image_reservation,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100939 error_msg);
940 if (map.IsValid()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700941 const size_t stored_size = image_header.GetDataSize();
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100942 MemMap temp_map = MemMap::MapFile(sizeof(ImageHeader) + stored_size,
943 PROT_READ,
944 MAP_PRIVATE,
945 fd,
Vladimir Markof4efa9e2018-10-17 14:12:45 +0100946 /*start=*/ 0,
947 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100948 image_filename,
949 error_msg);
950 if (!temp_map.IsValid()) {
Mathieu Chartier66b1d572017-02-10 18:41:39 -0800951 DCHECK(error_msg == nullptr || !error_msg->empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100952 return MemMap::Invalid();
Andreas Gampea463b6a2016-08-12 21:53:32 -0700953 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100954 memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800955
Mathieu Chartierada33d72018-12-17 13:17:30 -0800956 Runtime::ScopedThreadPoolUsage stpu;
957 ThreadPool* const pool = stpu.GetThreadPool();
Andreas Gampea463b6a2016-08-12 21:53:32 -0700958 const uint64_t start = NanoTime();
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800959 Thread* const self = Thread::Current();
Mathieu Chartierada33d72018-12-17 13:17:30 -0800960 static constexpr size_t kMinBlocks = 2u;
961 const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
Mathieu Chartier1a842962018-11-13 15:09:51 -0800962 for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800963 auto function = [&](Thread*) {
964 const uint64_t start2 = NanoTime();
965 ScopedTrace trace("LZ4 decompress block");
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800966 bool result = block.Decompress(/*out_ptr=*/map.Begin(),
967 /*in_ptr=*/temp_map.Begin(),
968 error_msg);
969 if (!result && error_msg != nullptr) {
970 *error_msg = "Failed to decompress image block " + *error_msg;
Mathieu Chartier1a842962018-11-13 15:09:51 -0800971 }
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800972 VLOG(image) << "Decompress block " << block.GetDataSize() << " -> "
973 << block.GetImageSize() << " in " << PrettyDuration(NanoTime() - start2);
974 };
975 if (use_parallel) {
976 pool->AddTask(self, new FunctionTask(std::move(function)));
977 } else {
978 function(self);
Mathieu Chartier1a842962018-11-13 15:09:51 -0800979 }
980 }
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800981 if (use_parallel) {
982 ScopedTrace trace("Waiting for workers");
Mathieu Chartierf1890fd2019-04-30 17:35:08 -0700983 // Go to native since we don't want to suspend while holding the mutator lock.
984 ScopedThreadSuspension sts(Thread::Current(), kNative);
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800985 pool->Wait(self, true, false);
986 }
Mathieu Chartier0d4d2912017-02-10 17:22:41 -0800987 const uint64_t time = NanoTime() - start;
988 // Add one 1 ns to prevent possible divide by 0.
989 VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100990 << PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
Mathieu Chartier0d4d2912017-02-10 17:22:41 -0800991 << "/s)";
Andreas Gampea463b6a2016-08-12 21:53:32 -0700992 }
993
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100994 return map;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700995 }
996
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800997 class EmptyRange {
Andreas Gampea463b6a2016-08-12 21:53:32 -0700998 public:
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -0800999 ALWAYS_INLINE bool InSource(uintptr_t) const { return false; }
1000 ALWAYS_INLINE bool InDest(uintptr_t) const { return false; }
1001 ALWAYS_INLINE uintptr_t ToDest(uintptr_t) const { UNREACHABLE(); }
1002 };
1003
1004 template <typename Range0, typename Range1 = EmptyRange, typename Range2 = EmptyRange>
1005 class ForwardAddress {
1006 public:
1007 ForwardAddress(const Range0& range0 = Range0(),
1008 const Range1& range1 = Range1(),
1009 const Range2& range2 = Range2())
1010 : range0_(range0), range1_(range1), range2_(range2) {}
Andreas Gampea463b6a2016-08-12 21:53:32 -07001011
1012 // Return the relocated address of a heap object.
Mathieu Chartier9a3da9a2018-12-21 12:56:55 -08001013 // Null checks must be performed in the caller (for performance reasons).
Andreas Gampea463b6a2016-08-12 21:53:32 -07001014 template <typename T>
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001015 ALWAYS_INLINE T* operator()(T* src) const {
Mathieu Chartier9a3da9a2018-12-21 12:56:55 -08001016 DCHECK(src != nullptr);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001017 const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001018 if (range2_.InSource(uint_src)) {
1019 return reinterpret_cast<T*>(range2_.ToDest(uint_src));
Andreas Gampea463b6a2016-08-12 21:53:32 -07001020 }
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001021 if (range1_.InSource(uint_src)) {
1022 return reinterpret_cast<T*>(range1_.ToDest(uint_src));
Andreas Gampea463b6a2016-08-12 21:53:32 -07001023 }
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001024 CHECK(range0_.InSource(uint_src))
1025 << reinterpret_cast<const void*>(src) << " not in "
1026 << reinterpret_cast<const void*>(range0_.Source()) << "-"
1027 << reinterpret_cast<const void*>(range0_.Source() + range0_.Length());
1028 return reinterpret_cast<T*>(range0_.ToDest(uint_src));
Andreas Gampea463b6a2016-08-12 21:53:32 -07001029 }
1030
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001031 private:
1032 const Range0 range0_;
1033 const Range1 range1_;
1034 const Range2 range2_;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001035 };
1036
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001037 template <typename Forward>
1038 class FixupRootVisitor {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001039 public:
1040 template<typename... Args>
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001041 explicit FixupRootVisitor(Args... args) : forward_(args...) {}
Andreas Gampea463b6a2016-08-12 21:53:32 -07001042
1043 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001044 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001045 if (!root->IsNull()) {
1046 VisitRoot(root);
1047 }
1048 }
1049
1050 ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001051 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001052 mirror::Object* ref = root->AsMirrorPtr();
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001053 mirror::Object* new_ref = forward_(ref);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001054 if (ref != new_ref) {
1055 root->Assign(new_ref);
1056 }
1057 }
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001058
1059 private:
1060 Forward forward_;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001061 };
1062
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001063 template <typename Forward>
1064 class FixupObjectVisitor {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001065 public:
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001066 explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited,
1067 const Forward& forward)
1068 : visited_(visited), forward_(forward) {}
Andreas Gampea463b6a2016-08-12 21:53:32 -07001069
1070 // Fix up separately since we also need to fix up method entrypoints.
1071 ALWAYS_INLINE void VisitRootIfNonNull(
1072 mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1073
1074 ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1075 const {}
1076
Mathieu Chartier31e88222016-10-14 18:43:19 -07001077 ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
Andreas Gampea463b6a2016-08-12 21:53:32 -07001078 MemberOffset offset,
1079 bool is_static ATTRIBUTE_UNUSED) const
1080 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001081 // Space is not yet added to the heap, don't do a read barrier.
1082 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
1083 offset);
Mathieu Chartier9a3da9a2018-12-21 12:56:55 -08001084 if (ref != nullptr) {
1085 // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
1086 // image.
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001087 obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, forward_(ref));
Mathieu Chartier9a3da9a2018-12-21 12:56:55 -08001088 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001089 }
1090
1091 // java.lang.ref.Reference visitor.
Mathieu Chartier31e88222016-10-14 18:43:19 -07001092 void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
1093 ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001094 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001095 mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
Mathieu Chartier9a3da9a2018-12-21 12:56:55 -08001096 if (obj != nullptr) {
1097 ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
1098 mirror::Reference::ReferentOffset(),
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001099 forward_(obj));
Mathieu Chartier9a3da9a2018-12-21 12:56:55 -08001100 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001101 }
1102
Mathieu Chartier8c19d242017-03-06 12:35:10 -08001103 void operator()(mirror::Object* obj) const
1104 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001105 if (!visited_->Set(obj)) {
1106 // Not already visited.
1107 obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
1108 *this,
1109 *this);
1110 CHECK(!obj->IsClass());
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001111 }
1112 }
Mathieu Chartier91edc622016-02-16 17:16:01 -08001113
Andreas Gampea463b6a2016-08-12 21:53:32 -07001114 private:
Andreas Gampea463b6a2016-08-12 21:53:32 -07001115 gc::accounting::ContinuousSpaceBitmap* const visited_;
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001116 Forward forward_;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001117 };
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001118
Andreas Gampea463b6a2016-08-12 21:53:32 -07001119 // Relocate an image space mapped at target_base which possibly used to be at a different base
Vladimir Marko4df2d802018-09-27 16:42:44 +00001120 // address. In place means modifying a single ImageSpace in place rather than relocating from
1121 // one ImageSpace to another.
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001122 template <PointerSize kPointerSize>
Andreas Gampea463b6a2016-08-12 21:53:32 -07001123 static bool RelocateInPlace(ImageHeader& image_header,
1124 uint8_t* target_base,
1125 accounting::ContinuousSpaceBitmap* bitmap,
1126 const OatFile* app_oat_file,
1127 std::string* error_msg) {
1128 DCHECK(error_msg != nullptr);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001129 // Set up sections.
1130 uint32_t boot_image_begin = 0;
1131 uint32_t boot_image_end = 0;
1132 uint32_t boot_oat_begin = 0;
1133 uint32_t boot_oat_end = 0;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001134 gc::Heap* const heap = Runtime::Current()->GetHeap();
1135 heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
1136 if (boot_image_begin == boot_image_end) {
1137 *error_msg = "Can not relocate app image without boot image space";
1138 return false;
1139 }
1140 if (boot_oat_begin == boot_oat_end) {
1141 *error_msg = "Can not relocate app image without boot oat file";
1142 return false;
1143 }
Vladimir Marko0c78ef72018-11-21 14:09:35 +00001144 const uint32_t boot_image_size = boot_oat_end - boot_image_begin;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001145 const uint32_t image_header_boot_image_size = image_header.GetBootImageSize();
Andreas Gampea463b6a2016-08-12 21:53:32 -07001146 if (boot_image_size != image_header_boot_image_size) {
1147 *error_msg = StringPrintf("Boot image size %" PRIu64 " does not match expected size %"
1148 PRIu64,
1149 static_cast<uint64_t>(boot_image_size),
1150 static_cast<uint64_t>(image_header_boot_image_size));
1151 return false;
1152 }
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001153 const ImageSection& objects_section = image_header.GetObjectsSection();
1154 // Where the app image objects are mapped to.
1155 uint8_t* objects_location = target_base + objects_section.Offset();
Andreas Gampea463b6a2016-08-12 21:53:32 -07001156 TimingLogger logger(__FUNCTION__, true, false);
1157 RelocationRange boot_image(image_header.GetBootImageBegin(),
1158 boot_image_begin,
1159 boot_image_size);
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001160 // Metadata is everything after the objects section, use exclusion to be safe.
1161 RelocationRange app_image_metadata(
1162 reinterpret_cast<uintptr_t>(image_header.GetImageBegin()) + objects_section.End(),
1163 reinterpret_cast<uintptr_t>(target_base) + objects_section.End(),
1164 image_header.GetImageSize() - objects_section.End());
1165 // App image heap objects, may be mapped in the heap.
1166 RelocationRange app_image_objects(
1167 reinterpret_cast<uintptr_t>(image_header.GetImageBegin()) + objects_section.Offset(),
1168 reinterpret_cast<uintptr_t>(objects_location),
1169 objects_section.Size());
Andreas Gampea463b6a2016-08-12 21:53:32 -07001170 // Use the oat data section since this is where the OatFile::Begin is.
1171 RelocationRange app_oat(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()),
1172 // Not necessarily in low 4GB.
1173 reinterpret_cast<uintptr_t>(app_oat_file->Begin()),
1174 image_header.GetOatDataEnd() - image_header.GetOatDataBegin());
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001175 VLOG(image) << "App image metadata " << app_image_metadata;
1176 VLOG(image) << "App image objects " << app_image_objects;
Andreas Gampea463b6a2016-08-12 21:53:32 -07001177 VLOG(image) << "App oat " << app_oat;
1178 VLOG(image) << "Boot image " << boot_image;
Vladimir Marko0c78ef72018-11-21 14:09:35 +00001179 // True if we need to fixup any heap pointers.
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001180 const bool fixup_image = boot_image.Delta() != 0 || app_image_metadata.Delta() != 0 ||
1181 app_image_objects.Delta() != 0;
Vladimir Marko0c78ef72018-11-21 14:09:35 +00001182 if (!fixup_image) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001183 // Nothing to fix up.
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001184 return true;
1185 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001186 ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001187
1188 using ForwardObject = ForwardAddress<RelocationRange, RelocationRange>;
1189 ForwardObject forward_object(boot_image, app_image_objects);
1190 ForwardObject forward_metadata(boot_image, app_image_metadata);
1191 using ForwardCode = ForwardAddress<RelocationRange, RelocationRange>;
1192 ForwardCode forward_code(boot_image, app_oat);
1193 PatchObjectVisitor<kPointerSize, ForwardObject, ForwardCode> patch_object_visitor(
1194 forward_object,
1195 forward_metadata);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001196 if (fixup_image) {
1197 // Two pass approach, fix up all classes first, then fix up non class-objects.
1198 // The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
1199 std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> visited_bitmap(
1200 gc::accounting::ContinuousSpaceBitmap::Create("Relocate bitmap",
1201 target_base,
1202 image_header.GetImageSize()));
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001203 {
1204 TimingLogger::ScopedTiming timing("Fixup classes", &logger);
1205 const auto& class_table_section = image_header.GetClassTableSection();
1206 if (class_table_section.Size() > 0u) {
1207 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001208 ClassTableVisitor class_table_visitor(forward_object);
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001209 size_t read_count = 0u;
1210 const uint8_t* data = target_base + class_table_section.Offset();
1211 // We avoid making a copy of the data since we want modifications to be propagated to the
1212 // memory map.
1213 ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
1214 for (ClassTable::TableSlot& slot : temp_set) {
1215 slot.VisitRoot(class_table_visitor);
Vladimir Marko1fe58392019-04-10 16:14:56 +01001216 ObjPtr<mirror::Class> klass = slot.Read<kWithoutReadBarrier>();
1217 if (!app_image_objects.InDest(klass.Ptr())) {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001218 continue;
1219 }
Vladimir Marko1fe58392019-04-10 16:14:56 +01001220 const bool already_marked = visited_bitmap->Set(klass.Ptr());
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001221 CHECK(!already_marked) << "App image class already visited";
Vladimir Marko1fe58392019-04-10 16:14:56 +01001222 patch_object_visitor.VisitClass(klass.Ptr());
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001223 // Then patch the non-embedded vtable and iftable.
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001224 ObjPtr<mirror::PointerArray> vtable =
1225 klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001226 if (vtable != nullptr &&
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001227 app_image_objects.InDest(vtable.Ptr()) &&
1228 !visited_bitmap->Set(vtable.Ptr())) {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001229 patch_object_visitor.VisitPointerArray(vtable);
1230 }
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001231 ObjPtr<mirror::IfTable> iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
1232 if (iftable != nullptr && app_image_objects.InDest(iftable.Ptr())) {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001233 // Avoid processing the fields of iftable since we will process them later anyways
1234 // below.
1235 int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
1236 for (int32_t i = 0; i != ifcount; ++i) {
Vladimir Marko557fece2019-03-26 14:29:41 +00001237 ObjPtr<mirror::PointerArray> unpatched_ifarray =
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001238 iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
1239 if (unpatched_ifarray != nullptr) {
1240 // The iftable has not been patched, so we need to explicitly adjust the pointer.
Vladimir Marko557fece2019-03-26 14:29:41 +00001241 ObjPtr<mirror::PointerArray> ifarray = forward_object(unpatched_ifarray.Ptr());
1242 if (app_image_objects.InDest(ifarray.Ptr()) &&
1243 !visited_bitmap->Set(ifarray.Ptr())) {
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001244 patch_object_visitor.VisitPointerArray(ifarray);
1245 }
1246 }
1247 }
1248 }
1249 }
1250 }
1251 }
1252
1253 // Fixup objects may read fields in the boot image, use the mutator lock here for sanity.
1254 // Though its probably not required.
Mathieu Chartier2ffc74b2019-01-03 19:25:41 -08001255 TimingLogger::ScopedTiming timing("Fixup objects", &logger);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001256 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001257 // Need to update the image to be at the target base.
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001258 uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
1259 uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001260 FixupObjectVisitor<ForwardObject> fixup_object_visitor(visited_bitmap.get(), forward_object);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001261 bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
1262 // Fixup image roots.
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001263 CHECK(app_image_objects.InSource(reinterpret_cast<uintptr_t>(
Vladimir Markoc13fbd82018-06-04 16:16:28 +01001264 image_header.GetImageRoots<kWithoutReadBarrier>().Ptr())));
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001265 image_header.RelocateImageObjects(app_image_objects.Delta());
Andreas Gampea463b6a2016-08-12 21:53:32 -07001266 CHECK_EQ(image_header.GetImageBegin(), target_base);
1267 // Fix up dex cache DexFile pointers.
Vladimir Marko4617d582019-03-28 13:48:31 +00001268 ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
1269 image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kDexCaches)
1270 ->AsObjectArray<mirror::DexCache, kVerifyNone>();
Andreas Gampea463b6a2016-08-12 21:53:32 -07001271 for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
Vladimir Marko423bebb2019-03-26 15:17:21 +00001272 ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
Mathieu Chartier25602dc2018-12-11 11:31:57 -08001273 CHECK(dex_cache != nullptr);
1274 patch_object_visitor.VisitDexCacheArrays(dex_cache);
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001275 }
1276 }
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001277 {
1278 // Only touches objects in the app image, no need for mutator lock.
Andreas Gampea463b6a2016-08-12 21:53:32 -07001279 TimingLogger::ScopedTiming timing("Fixup methods", &logger);
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001280 image_header.VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
1281 // TODO: Consider a separate visitor for runtime vs normal methods.
1282 if (UNLIKELY(method.IsRuntimeMethod())) {
1283 ImtConflictTable* table = method.GetImtConflictTable(kPointerSize);
1284 if (table != nullptr) {
1285 ImtConflictTable* new_table = forward_metadata(table);
1286 if (table != new_table) {
1287 method.SetImtConflictTable(new_table, kPointerSize);
1288 }
1289 }
1290 const void* old_code = method.GetEntryPointFromQuickCompiledCodePtrSize(kPointerSize);
1291 const void* new_code = forward_code(old_code);
1292 if (old_code != new_code) {
1293 method.SetEntryPointFromQuickCompiledCodePtrSize(new_code, kPointerSize);
1294 }
1295 } else {
1296 method.UpdateObjectsForImageRelocation(forward_object);
1297 method.UpdateEntrypoints(forward_code, kPointerSize);
1298 }
1299 }, target_base, kPointerSize);
Mathieu Chartiere42888f2016-04-14 10:49:19 -07001300 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001301 if (fixup_image) {
1302 {
1303 // Only touches objects in the app image, no need for mutator lock.
1304 TimingLogger::ScopedTiming timing("Fixup fields", &logger);
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001305 image_header.VisitPackedArtFields([&](ArtField& field) NO_THREAD_SAFETY_ANALYSIS {
1306 field.UpdateObjects(forward_object);
1307 }, target_base);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001308 }
1309 {
1310 TimingLogger::ScopedTiming timing("Fixup imt", &logger);
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001311 image_header.VisitPackedImTables(forward_metadata, target_base, kPointerSize);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001312 }
1313 {
1314 TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001315 image_header.VisitPackedImtConflictTables(forward_metadata, target_base, kPointerSize);
Andreas Gampea463b6a2016-08-12 21:53:32 -07001316 }
1317 // In the app image case, the image methods are actually in the boot image.
1318 image_header.RelocateImageMethods(boot_image.Delta());
Mathieu Chartier74ccee62018-10-10 10:30:29 -07001319 // Fix up the intern table.
1320 const auto& intern_table_section = image_header.GetInternedStringsSection();
1321 if (intern_table_section.Size() > 0u) {
1322 TimingLogger::ScopedTiming timing("Fixup intern table", &logger);
1323 ScopedObjectAccess soa(Thread::Current());
1324 // Fixup the pointers in the newly written intern table to contain image addresses.
1325 InternTable temp_intern_table;
1326 // Note that we require that ReadFromMemory does not make an internal copy of the elements
1327 // so that the VisitRoots() will update the memory directly rather than the copies.
Mathieu Chartier74ccee62018-10-10 10:30:29 -07001328 temp_intern_table.AddTableFromMemory(target_base + intern_table_section.Offset(),
1329 [&](InternTable::UnorderedSet& strings)
1330 REQUIRES_SHARED(Locks::mutator_lock_) {
1331 for (GcRoot<mirror::String>& root : strings) {
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001332 root = GcRoot<mirror::String>(forward_object(root.Read<kWithoutReadBarrier>()));
Mathieu Chartier74ccee62018-10-10 10:30:29 -07001333 }
Mathieu Chartier8cc418e2018-10-31 10:54:30 -07001334 }, /*is_boot_image=*/ false);
Mathieu Chartier74ccee62018-10-10 10:30:29 -07001335 }
Artem Udovichenkoa62cb9b2016-06-30 09:18:25 +00001336 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001337 if (VLOG_IS_ON(image)) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001338 logger.Dump(LOG_STREAM(INFO));
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001339 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001340 return true;
Andreas Gampe7fa55782016-06-15 17:45:01 -07001341 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07001342};
Hiroshi Yamauchibd0fb612014-05-20 13:46:00 -07001343
Vladimir Marko82e1e272018-08-20 13:38:06 +00001344class ImageSpace::BootImageLoader {
1345 public:
Vladimir Marko91f10322018-12-07 18:04:10 +00001346 BootImageLoader(const std::vector<std::string>& boot_class_path,
1347 const std::vector<std::string>& boot_class_path_locations,
1348 const std::string& image_location,
Vladimir Marko3364d182019-03-13 13:55:01 +00001349 InstructionSet image_isa,
1350 bool relocate,
1351 bool executable,
1352 bool is_zygote)
Vladimir Marko91f10322018-12-07 18:04:10 +00001353 : boot_class_path_(boot_class_path),
1354 boot_class_path_locations_(boot_class_path_locations),
1355 image_location_(image_location),
Vladimir Marko82e1e272018-08-20 13:38:06 +00001356 image_isa_(image_isa),
Vladimir Marko3364d182019-03-13 13:55:01 +00001357 relocate_(relocate),
1358 executable_(executable),
1359 is_zygote_(is_zygote),
Vladimir Marko82e1e272018-08-20 13:38:06 +00001360 has_system_(false),
1361 has_cache_(false),
1362 is_global_cache_(true),
Vladimir Markoe3070022018-08-22 09:36:19 +00001363 dalvik_cache_exists_(false),
Vladimir Marko82e1e272018-08-20 13:38:06 +00001364 dalvik_cache_(),
1365 cache_filename_() {
1366 }
1367
1368 bool IsZygote() const { return is_zygote_; }
1369
1370 void FindImageFiles() {
1371 std::string system_filename;
Vladimir Marko82e1e272018-08-20 13:38:06 +00001372 bool found_image = FindImageFilenameImpl(image_location_.c_str(),
1373 image_isa_,
1374 &has_system_,
1375 &system_filename,
Vladimir Markoe3070022018-08-22 09:36:19 +00001376 &dalvik_cache_exists_,
Vladimir Marko82e1e272018-08-20 13:38:06 +00001377 &dalvik_cache_,
1378 &is_global_cache_,
1379 &has_cache_,
1380 &cache_filename_);
Vladimir Markoe3070022018-08-22 09:36:19 +00001381 DCHECK(!dalvik_cache_exists_ || !dalvik_cache_.empty());
Vladimir Marko82e1e272018-08-20 13:38:06 +00001382 DCHECK_EQ(found_image, has_system_ || has_cache_);
1383 }
1384
1385 bool HasSystem() const { return has_system_; }
1386 bool HasCache() const { return has_cache_; }
1387
Vladimir Markoe3070022018-08-22 09:36:19 +00001388 bool DalvikCacheExists() const { return dalvik_cache_exists_; }
Vladimir Marko82e1e272018-08-20 13:38:06 +00001389 bool IsGlobalCache() const { return is_global_cache_; }
1390
1391 const std::string& GetDalvikCache() const {
Vladimir Marko82e1e272018-08-20 13:38:06 +00001392 return dalvik_cache_;
1393 }
1394
1395 const std::string& GetCacheFilename() const {
Vladimir Marko82e1e272018-08-20 13:38:06 +00001396 return cache_filename_;
1397 }
1398
Andreas Gampe86823542019-02-25 09:38:49 -08001399 bool LoadFromSystem(bool validate_oat_file,
1400 size_t extra_reservation_size,
Vladimir Markod44d7032018-08-30 13:02:31 +01001401 /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1402 /*out*/MemMap* extra_reservation,
1403 /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001404 TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
Vladimir Marko82e1e272018-08-20 13:38:06 +00001405 std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
Vladimir Markoc09cd052018-08-23 16:36:36 +01001406
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001407 if (!LoadFromFile(filename,
Andreas Gampe86823542019-02-25 09:38:49 -08001408 validate_oat_file,
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001409 extra_reservation_size,
1410 &logger,
1411 boot_image_spaces,
1412 extra_reservation,
1413 error_msg)) {
Vladimir Markoc09cd052018-08-23 16:36:36 +01001414 return false;
1415 }
1416
Vladimir Marko4df2d802018-09-27 16:42:44 +00001417 if (VLOG_IS_ON(image)) {
Vladimir Marko312f10e2018-11-21 12:35:24 +00001418 LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromSystem exiting "
1419 << boot_image_spaces->front();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001420 logger.Dump(LOG_STREAM(INFO));
1421 }
Vladimir Marko82e1e272018-08-20 13:38:06 +00001422 return true;
1423 }
1424
1425 bool LoadFromDalvikCache(
Vladimir Marko82e1e272018-08-20 13:38:06 +00001426 bool validate_oat_file,
Vladimir Markod44d7032018-08-30 13:02:31 +01001427 size_t extra_reservation_size,
1428 /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1429 /*out*/MemMap* extra_reservation,
1430 /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001431 TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
Vladimir Marko82e1e272018-08-20 13:38:06 +00001432 DCHECK(DalvikCacheExists());
Vladimir Markoc09cd052018-08-23 16:36:36 +01001433
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001434 if (!LoadFromFile(cache_filename_,
1435 validate_oat_file,
1436 extra_reservation_size,
1437 &logger,
1438 boot_image_spaces,
1439 extra_reservation,
1440 error_msg)) {
Vladimir Markoc09cd052018-08-23 16:36:36 +01001441 return false;
1442 }
1443
Vladimir Marko4df2d802018-09-27 16:42:44 +00001444 if (VLOG_IS_ON(image)) {
Vladimir Marko312f10e2018-11-21 12:35:24 +00001445 LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromDalvikCache exiting "
1446 << boot_image_spaces->front();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001447 logger.Dump(LOG_STREAM(INFO));
1448 }
Vladimir Marko82e1e272018-08-20 13:38:06 +00001449 return true;
1450 }
1451
1452 private:
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001453 bool LoadFromFile(
1454 const std::string& filename,
1455 bool validate_oat_file,
1456 size_t extra_reservation_size,
1457 TimingLogger* logger,
1458 /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1459 /*out*/MemMap* extra_reservation,
1460 /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
1461 ImageHeader system_hdr;
1462 if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
1463 *error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
1464 return false;
1465 }
Vladimir Marko0ace5632018-12-14 11:11:47 +00001466 if (system_hdr.GetComponentCount() == 0u ||
1467 system_hdr.GetComponentCount() > boot_class_path_.size()) {
1468 *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
1469 "expected non-zero and <= %zu",
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001470 filename.c_str(),
1471 system_hdr.GetComponentCount(),
1472 boot_class_path_.size());
1473 return false;
1474 }
1475 MemMap image_reservation;
1476 MemMap local_extra_reservation;
1477 if (!ReserveBootImageMemory(system_hdr.GetImageReservationSize(),
1478 reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin()),
1479 extra_reservation_size,
1480 &image_reservation,
1481 &local_extra_reservation,
1482 error_msg)) {
1483 return false;
1484 }
1485
Vladimir Marko0ace5632018-12-14 11:11:47 +00001486 ArrayRef<const std::string> provided_locations(boot_class_path_locations_.data(),
1487 system_hdr.GetComponentCount());
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001488 std::vector<std::string> locations =
Vladimir Marko0ace5632018-12-14 11:11:47 +00001489 ExpandMultiImageLocations(provided_locations, image_location_);
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001490 std::vector<std::string> filenames =
Vladimir Marko0ace5632018-12-14 11:11:47 +00001491 ExpandMultiImageLocations(provided_locations, filename);
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001492 DCHECK_EQ(locations.size(), filenames.size());
1493 std::vector<std::unique_ptr<ImageSpace>> spaces;
1494 spaces.reserve(locations.size());
1495 for (std::size_t i = 0u, size = locations.size(); i != size; ++i) {
1496 spaces.push_back(Load(locations[i], filenames[i], logger, &image_reservation, error_msg));
1497 const ImageSpace* space = spaces.back().get();
1498 if (space == nullptr) {
1499 return false;
1500 }
1501 uint32_t expected_component_count = (i == 0u) ? system_hdr.GetComponentCount() : 0u;
1502 uint32_t expected_reservation_size = (i == 0u) ? system_hdr.GetImageReservationSize() : 0u;
1503 if (!Loader::CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
1504 !Loader::CheckImageComponentCount(*space, expected_component_count, error_msg)) {
1505 return false;
1506 }
1507 }
1508 for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
1509 std::string expected_boot_class_path =
Vladimir Marko0ace5632018-12-14 11:11:47 +00001510 (i == 0u) ? android::base::Join(provided_locations, ':') : std::string();
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001511 if (!OpenOatFile(spaces[i].get(),
1512 boot_class_path_[i],
1513 expected_boot_class_path,
1514 validate_oat_file,
1515 logger,
1516 &image_reservation,
1517 error_msg)) {
1518 return false;
1519 }
1520 }
1521 if (!CheckReservationExhausted(image_reservation, error_msg)) {
1522 return false;
1523 }
1524
1525 MaybeRelocateSpaces(spaces, logger);
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001526 boot_image_spaces->swap(spaces);
1527 *extra_reservation = std::move(local_extra_reservation);
1528 return true;
1529 }
1530
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001531 private:
1532 class RelocateVisitor {
1533 public:
1534 explicit RelocateVisitor(uint32_t diff) : diff_(diff) {}
Vladimir Marko4df2d802018-09-27 16:42:44 +00001535
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001536 template <typename T>
1537 ALWAYS_INLINE T* operator()(T* src) const {
1538 DCHECK(src != nullptr);
1539 return reinterpret_cast32<T*>(reinterpret_cast32<uint32_t>(src) + diff_);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001540 }
Vladimir Marko4df2d802018-09-27 16:42:44 +00001541
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001542 private:
1543 const uint32_t diff_;
1544 };
Vladimir Marko4df2d802018-09-27 16:42:44 +00001545
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001546 static void** PointerAddress(ArtMethod* method, MemberOffset offset) {
1547 return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(method) + offset.Uint32Value());
1548 }
1549
Vladimir Marko4df2d802018-09-27 16:42:44 +00001550 template <PointerSize kPointerSize>
1551 static void DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
1552 uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier2ffc74b2019-01-03 19:25:41 -08001553 std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> patched_objects(
1554 gc::accounting::ContinuousSpaceBitmap::Create(
1555 "Marked objects",
1556 spaces.front()->Begin(),
1557 spaces.back()->End() - spaces.front()->Begin()));
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001558 using PatchRelocateVisitor = PatchObjectVisitor<kPointerSize, RelocateVisitor, RelocateVisitor>;
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001559 RelocateVisitor relocate_visitor(diff);
Mathieu Chartierf0a96eb2019-01-11 11:06:43 -08001560 PatchRelocateVisitor patch_object_visitor(relocate_visitor, relocate_visitor);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001561
1562 mirror::Class* dcheck_class_class = nullptr; // Used only for a DCHECK().
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001563 for (const std::unique_ptr<ImageSpace>& space : spaces) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00001564 // First patch the image header. The `diff` is OK for patching 32-bit fields but
1565 // the 64-bit method fields in the ImageHeader may need a negative `delta`.
1566 reinterpret_cast<ImageHeader*>(space->Begin())->RelocateImage(
Vladimir Markoe37b7912019-03-13 10:52:39 +00001567 (reinterpret_cast32<uint32_t>(space->Begin()) >= -diff) // Would `begin+diff` overflow?
Vladimir Marko4df2d802018-09-27 16:42:44 +00001568 ? -static_cast<int64_t>(-diff) : static_cast<int64_t>(diff));
1569
1570 // Patch fields and methods.
1571 const ImageHeader& image_header = space->GetImageHeader();
Mathieu Chartier9d5956a2019-03-22 11:29:08 -07001572 image_header.VisitPackedArtFields([&](ArtField& field) REQUIRES_SHARED(Locks::mutator_lock_) {
1573 patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(
1574 &field.DeclaringClassRoot());
1575 }, space->Begin());
1576 image_header.VisitPackedArtMethods([&](ArtMethod& method)
1577 REQUIRES_SHARED(Locks::mutator_lock_) {
1578 patch_object_visitor.PatchGcRoot(&method.DeclaringClassRoot());
1579 void** data_address = PointerAddress(&method, ArtMethod::DataOffset(kPointerSize));
1580 patch_object_visitor.PatchNativePointer(data_address);
1581 void** entrypoint_address =
1582 PointerAddress(&method, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kPointerSize));
1583 patch_object_visitor.PatchNativePointer(entrypoint_address);
1584 }, space->Begin(), kPointerSize);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001585 auto method_table_visitor = [&](ArtMethod* method) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00001586 DCHECK(method != nullptr);
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001587 return relocate_visitor(method);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001588 };
1589 image_header.VisitPackedImTables(method_table_visitor, space->Begin(), kPointerSize);
1590 image_header.VisitPackedImtConflictTables(method_table_visitor, space->Begin(), kPointerSize);
1591
1592 // Patch the intern table.
1593 if (image_header.GetInternedStringsSection().Size() != 0u) {
1594 const uint8_t* data = space->Begin() + image_header.GetInternedStringsSection().Offset();
1595 size_t read_count;
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001596 InternTable::UnorderedSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001597 for (GcRoot<mirror::String>& slot : temp_set) {
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001598 patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(&slot);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001599 }
1600 }
1601
1602 // Patch the class table and classes, so that we can traverse class hierarchy to
1603 // determine the types of other objects when we visit them later.
1604 if (image_header.GetClassTableSection().Size() != 0u) {
1605 uint8_t* data = space->Begin() + image_header.GetClassTableSection().Offset();
1606 size_t read_count;
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001607 ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001608 DCHECK(!temp_set.empty());
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001609 ClassTableVisitor class_table_visitor(relocate_visitor);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001610 for (ClassTable::TableSlot& slot : temp_set) {
1611 slot.VisitRoot(class_table_visitor);
Vladimir Marko1fe58392019-04-10 16:14:56 +01001612 ObjPtr<mirror::Class> klass = slot.Read<kWithoutReadBarrier>();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001613 DCHECK(klass != nullptr);
Vladimir Marko1fe58392019-04-10 16:14:56 +01001614 patched_objects->Set(klass.Ptr());
1615 patch_object_visitor.VisitClass(klass.Ptr());
Vladimir Marko4df2d802018-09-27 16:42:44 +00001616 if (kIsDebugBuild) {
1617 mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
1618 if (dcheck_class_class == nullptr) {
1619 dcheck_class_class = class_class;
1620 } else {
1621 CHECK_EQ(class_class, dcheck_class_class);
1622 }
1623 }
1624 // Then patch the non-embedded vtable and iftable.
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001625 ObjPtr<mirror::PointerArray> vtable =
1626 klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
1627 if (vtable != nullptr && !patched_objects->Set(vtable.Ptr())) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00001628 patch_object_visitor.VisitPointerArray(vtable);
1629 }
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001630 ObjPtr<mirror::IfTable> iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001631 if (iftable != nullptr) {
Vladimir Markodbcb48f2018-11-12 11:47:04 +00001632 int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001633 for (int32_t i = 0; i != ifcount; ++i) {
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001634 ObjPtr<mirror::PointerArray> unpatched_ifarray =
Vladimir Marko4df2d802018-09-27 16:42:44 +00001635 iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
1636 if (unpatched_ifarray != nullptr) {
1637 // The iftable has not been patched, so we need to explicitly adjust the pointer.
Vladimir Markoc524e9e2019-03-26 10:54:50 +00001638 ObjPtr<mirror::PointerArray> ifarray = relocate_visitor(unpatched_ifarray.Ptr());
1639 if (!patched_objects->Set(ifarray.Ptr())) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00001640 patch_object_visitor.VisitPointerArray(ifarray);
1641 }
1642 }
1643 }
1644 }
1645 }
1646 }
1647 }
1648
1649 // Patch class roots now, so that we can recognize mirror::Method and mirror::Constructor.
1650 ObjPtr<mirror::Class> method_class;
1651 ObjPtr<mirror::Class> constructor_class;
1652 {
1653 const ImageSpace* space = spaces.front().get();
1654 const ImageHeader& image_header = space->GetImageHeader();
1655
1656 ObjPtr<mirror::ObjectArray<mirror::Object>> image_roots =
1657 image_header.GetImageRoots<kWithoutReadBarrier>();
Mathieu Chartier2ffc74b2019-01-03 19:25:41 -08001658 patched_objects->Set(image_roots.Ptr());
Vladimir Marko4df2d802018-09-27 16:42:44 +00001659 patch_object_visitor.VisitObject(image_roots.Ptr());
1660
1661 ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
Vladimir Markod7e9bbf2019-03-28 13:18:57 +00001662 ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(
1663 image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kClassRoots));
Mathieu Chartier2ffc74b2019-01-03 19:25:41 -08001664 patched_objects->Set(class_roots.Ptr());
Vladimir Marko4df2d802018-09-27 16:42:44 +00001665 patch_object_visitor.VisitObject(class_roots.Ptr());
1666
1667 method_class = GetClassRoot<mirror::Method, kWithoutReadBarrier>(class_roots);
1668 constructor_class = GetClassRoot<mirror::Constructor, kWithoutReadBarrier>(class_roots);
1669 }
1670
Vladimir Markoafe14eb2018-10-01 12:11:14 +01001671 for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00001672 const ImageSpace* space = spaces[s].get();
1673 const ImageHeader& image_header = space->GetImageHeader();
1674
1675 static_assert(IsAligned<kObjectAlignment>(sizeof(ImageHeader)), "Header alignment check");
1676 uint32_t objects_end = image_header.GetObjectsSection().Size();
1677 DCHECK_ALIGNED(objects_end, kObjectAlignment);
1678 for (uint32_t pos = sizeof(ImageHeader); pos != objects_end; ) {
1679 mirror::Object* object = reinterpret_cast<mirror::Object*>(space->Begin() + pos);
Mathieu Chartier2ffc74b2019-01-03 19:25:41 -08001680 if (!patched_objects->Test(object)) {
1681 // This is the last pass over objects, so we do not need to Set().
Vladimir Marko4df2d802018-09-27 16:42:44 +00001682 patch_object_visitor.VisitObject(object);
Vladimir Marko4617d582019-03-28 13:48:31 +00001683 ObjPtr<mirror::Class> klass = object->GetClass<kVerifyNone, kWithoutReadBarrier>();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001684 if (klass->IsDexCacheClass<kVerifyNone>()) {
1685 // Patch dex cache array pointers and elements.
Vladimir Marko4617d582019-03-28 13:48:31 +00001686 ObjPtr<mirror::DexCache> dex_cache =
1687 object->AsDexCache<kVerifyNone, kWithoutReadBarrier>();
Vladimir Marko4df2d802018-09-27 16:42:44 +00001688 patch_object_visitor.VisitDexCacheArrays(dex_cache);
1689 } else if (klass == method_class || klass == constructor_class) {
1690 // Patch the ArtMethod* in the mirror::Executable subobject.
1691 ObjPtr<mirror::Executable> as_executable =
Vladimir Markod7e9bbf2019-03-28 13:18:57 +00001692 ObjPtr<mirror::Executable>::DownCast(object);
Vladimir Marko4df2d802018-09-27 16:42:44 +00001693 ArtMethod* unpatched_method = as_executable->GetArtMethod<kVerifyNone>();
Mathieu Chartierd3f037b2018-12-06 23:50:56 -08001694 ArtMethod* patched_method = relocate_visitor(unpatched_method);
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001695 as_executable->SetArtMethod</*kTransactionActive=*/ false,
1696 /*kCheckTransaction=*/ true,
Vladimir Marko4df2d802018-09-27 16:42:44 +00001697 kVerifyNone>(patched_method);
1698 }
1699 }
1700 pos += RoundUp(object->SizeOf<kVerifyNone>(), kObjectAlignment);
1701 }
1702 }
1703 }
1704
Vladimir Marko3364d182019-03-13 13:55:01 +00001705 void MaybeRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
1706 TimingLogger* logger)
Vladimir Marko4df2d802018-09-27 16:42:44 +00001707 REQUIRES_SHARED(Locks::mutator_lock_) {
1708 TimingLogger::ScopedTiming timing("MaybeRelocateSpaces", logger);
1709 ImageSpace* first_space = spaces.front().get();
1710 const ImageHeader& first_space_header = first_space->GetImageHeader();
1711 uint32_t diff =
1712 static_cast<uint32_t>(first_space->Begin() - first_space_header.GetImageBegin());
Vladimir Marko3364d182019-03-13 13:55:01 +00001713 if (!relocate_) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00001714 DCHECK_EQ(diff, 0u);
1715 return;
1716 }
1717
1718 PointerSize pointer_size = first_space_header.GetPointerSize();
1719 if (pointer_size == PointerSize::k64) {
1720 DoRelocateSpaces<PointerSize::k64>(spaces, diff);
1721 } else {
1722 DoRelocateSpaces<PointerSize::k32>(spaces, diff);
1723 }
1724 }
1725
Vladimir Markoc09cd052018-08-23 16:36:36 +01001726 std::unique_ptr<ImageSpace> Load(const std::string& image_location,
1727 const std::string& image_filename,
Vladimir Marko4df2d802018-09-27 16:42:44 +00001728 TimingLogger* logger,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001729 /*inout*/MemMap* image_reservation,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001730 /*out*/std::string* error_msg)
1731 REQUIRES_SHARED(Locks::mutator_lock_) {
1732 // Should this be a RDWR lock? This is only a defensive measure, as at
1733 // this point the image should exist.
1734 // However, only the zygote can write into the global dalvik-cache, so
1735 // restrict to zygote processes, or any process that isn't using
1736 // /data/dalvik-cache (which we assume to be allowed to write there).
1737 const bool rw_lock = is_zygote_ || !is_global_cache_;
1738
1739 // Note that we must not use the file descriptor associated with
1740 // ScopedFlock::GetFile to Init the image file. We want the file
1741 // descriptor (and the associated exclusive lock) to be released when
1742 // we leave Create.
1743 ScopedFlock image = LockedFile::Open(image_filename.c_str(),
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001744 /*flags=*/ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY,
1745 /*block=*/ true,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001746 error_msg);
1747
1748 VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
1749 << image_location;
1750 // If we are in /system we can assume the image is good. We can also
1751 // assume this if we are using a relocated image (i.e. image checksum
1752 // matches) since this is only different by the offset. We need this to
1753 // make sure that host tests continue to work.
1754 // Since we are the boot image, pass null since we load the oat file from the boot image oat
1755 // file name.
1756 return Loader::Init(image_filename.c_str(),
1757 image_location.c_str(),
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001758 /*oat_file=*/ nullptr,
Vladimir Marko4df2d802018-09-27 16:42:44 +00001759 logger,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001760 image_reservation,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001761 error_msg);
1762 }
1763
Vladimir Marko312f10e2018-11-21 12:35:24 +00001764 bool OpenOatFile(ImageSpace* space,
Vladimir Marko91f10322018-12-07 18:04:10 +00001765 const std::string& dex_filename,
1766 const std::string& expected_boot_class_path,
Vladimir Marko312f10e2018-11-21 12:35:24 +00001767 bool validate_oat_file,
1768 TimingLogger* logger,
1769 /*inout*/MemMap* image_reservation,
1770 /*out*/std::string* error_msg) {
1771 // VerifyImageAllocations() will be called later in Runtime::Init()
1772 // as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
1773 // and ArtField::java_lang_reflect_ArtField_, which are used from
1774 // Object::SizeOf() which VerifyImageAllocations() calls, are not
1775 // set yet at this point.
1776 DCHECK(image_reservation != nullptr);
1777 std::unique_ptr<OatFile> oat_file;
1778 {
1779 TimingLogger::ScopedTiming timing("OpenOatFile", logger);
1780 std::string oat_filename =
1781 ImageHeader::GetOatLocationFromImageLocation(space->GetImageFilename());
Vladimir Marko91f10322018-12-07 18:04:10 +00001782 std::string oat_location =
1783 ImageHeader::GetOatLocationFromImageLocation(space->GetImageLocation());
Vladimir Marko312f10e2018-11-21 12:35:24 +00001784
1785 oat_file.reset(OatFile::Open(/*zip_fd=*/ -1,
1786 oat_filename,
Vladimir Marko91f10322018-12-07 18:04:10 +00001787 oat_location,
Vladimir Marko3364d182019-03-13 13:55:01 +00001788 executable_,
Vladimir Marko312f10e2018-11-21 12:35:24 +00001789 /*low_4gb=*/ false,
Vladimir Marko91f10322018-12-07 18:04:10 +00001790 /*abs_dex_location=*/ dex_filename.c_str(),
Vladimir Marko312f10e2018-11-21 12:35:24 +00001791 image_reservation,
1792 error_msg));
1793 if (oat_file == nullptr) {
1794 *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
1795 oat_filename.c_str(),
1796 space->GetName(),
1797 error_msg->c_str());
1798 return false;
1799 }
1800 const ImageHeader& image_header = space->GetImageHeader();
1801 uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
1802 uint32_t image_oat_checksum = image_header.GetOatChecksum();
1803 if (oat_checksum != image_oat_checksum) {
1804 *error_msg = StringPrintf("Failed to match oat file checksum 0x%x to expected oat checksum"
1805 " 0x%x in image %s",
1806 oat_checksum,
1807 image_oat_checksum,
1808 space->GetName());
1809 return false;
1810 }
Vladimir Marko91f10322018-12-07 18:04:10 +00001811 const char* oat_boot_class_path =
1812 oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathKey);
1813 oat_boot_class_path = (oat_boot_class_path != nullptr) ? oat_boot_class_path : "";
1814 if (expected_boot_class_path != oat_boot_class_path) {
1815 *error_msg = StringPrintf("Failed to match oat boot class path %s to expected "
1816 "boot class path %s in image %s",
1817 oat_boot_class_path,
1818 expected_boot_class_path.c_str(),
1819 space->GetName());
1820 return false;
1821 }
Vladimir Marko312f10e2018-11-21 12:35:24 +00001822 ptrdiff_t relocation_diff = space->Begin() - image_header.GetImageBegin();
1823 CHECK(image_header.GetOatDataBegin() != nullptr);
1824 uint8_t* oat_data_begin = image_header.GetOatDataBegin() + relocation_diff;
1825 if (oat_file->Begin() != oat_data_begin) {
1826 *error_msg = StringPrintf("Oat file '%s' referenced from image %s has unexpected begin"
1827 " %p v. %p",
1828 oat_filename.c_str(),
1829 space->GetName(),
1830 oat_file->Begin(),
1831 oat_data_begin);
1832 return false;
1833 }
1834 }
1835 if (validate_oat_file) {
1836 TimingLogger::ScopedTiming timing("ValidateOatFile", logger);
1837 if (!ImageSpace::ValidateOatFile(*oat_file, error_msg)) {
1838 DCHECK(!error_msg->empty());
1839 return false;
1840 }
1841 }
1842 space->oat_file_ = std::move(oat_file);
1843 space->oat_file_non_owned_ = space->oat_file_.get();
1844 return true;
1845 }
1846
Vladimir Marko312f10e2018-11-21 12:35:24 +00001847 bool ReserveBootImageMemory(uint32_t reservation_size,
1848 uint32_t image_start,
Vladimir Markod44d7032018-08-30 13:02:31 +01001849 size_t extra_reservation_size,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001850 /*out*/MemMap* image_reservation,
Vladimir Markod44d7032018-08-30 13:02:31 +01001851 /*out*/MemMap* extra_reservation,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001852 /*out*/std::string* error_msg) {
Vladimir Marko7391c8c2018-11-21 17:58:44 +00001853 DCHECK_ALIGNED(reservation_size, kPageSize);
1854 DCHECK_ALIGNED(image_start, kPageSize);
Vladimir Markoc09cd052018-08-23 16:36:36 +01001855 DCHECK(!image_reservation->IsValid());
Vladimir Marko312f10e2018-11-21 12:35:24 +00001856 DCHECK_LT(extra_reservation_size, std::numeric_limits<uint32_t>::max() - reservation_size);
1857 size_t total_size = reservation_size + extra_reservation_size;
Vladimir Markoae581ed2018-10-08 09:29:05 +01001858 // If relocating, choose a random address for ALSR.
Vladimir Marko3364d182019-03-13 13:55:01 +00001859 uint32_t addr = relocate_ ? ART_BASE_ADDRESS + ChooseRelocationOffsetDelta() : image_start;
Vladimir Markoc09cd052018-08-23 16:36:36 +01001860 *image_reservation =
1861 MemMap::MapAnonymous("Boot image reservation",
Vladimir Markoae581ed2018-10-08 09:29:05 +01001862 reinterpret_cast32<uint8_t*>(addr),
1863 total_size,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001864 PROT_NONE,
Vladimir Markof4efa9e2018-10-17 14:12:45 +01001865 /*low_4gb=*/ true,
1866 /*reuse=*/ false,
1867 /*reservation=*/ nullptr,
Vladimir Markoc09cd052018-08-23 16:36:36 +01001868 error_msg);
1869 if (!image_reservation->IsValid()) {
1870 return false;
1871 }
Vladimir Markod44d7032018-08-30 13:02:31 +01001872 DCHECK(!extra_reservation->IsValid());
1873 if (extra_reservation_size != 0u) {
1874 DCHECK_ALIGNED(extra_reservation_size, kPageSize);
1875 DCHECK_LT(extra_reservation_size, image_reservation->Size());
1876 uint8_t* split = image_reservation->End() - extra_reservation_size;
1877 *extra_reservation = image_reservation->RemapAtEnd(split,
1878 "Boot image extra reservation",
1879 PROT_NONE,
1880 error_msg);
1881 if (!extra_reservation->IsValid()) {
1882 return false;
1883 }
1884 }
Vladimir Markoc09cd052018-08-23 16:36:36 +01001885
1886 return true;
1887 }
1888
Vladimir Marko312f10e2018-11-21 12:35:24 +00001889 bool CheckReservationExhausted(const MemMap& image_reservation, /*out*/std::string* error_msg) {
Vladimir Markoc09cd052018-08-23 16:36:36 +01001890 if (image_reservation.IsValid()) {
1891 *error_msg = StringPrintf("Excessive image reservation after loading boot image: %p-%p",
1892 image_reservation.Begin(),
1893 image_reservation.End());
1894 return false;
1895 }
Vladimir Markoc09cd052018-08-23 16:36:36 +01001896 return true;
1897 }
1898
Vladimir Marko91f10322018-12-07 18:04:10 +00001899 const std::vector<std::string>& boot_class_path_;
1900 const std::vector<std::string>& boot_class_path_locations_;
Vladimir Marko82e1e272018-08-20 13:38:06 +00001901 const std::string& image_location_;
1902 InstructionSet image_isa_;
Vladimir Marko3364d182019-03-13 13:55:01 +00001903 bool relocate_;
1904 bool executable_;
Vladimir Marko82e1e272018-08-20 13:38:06 +00001905 bool is_zygote_;
1906 bool has_system_;
1907 bool has_cache_;
1908 bool is_global_cache_;
Vladimir Markoe3070022018-08-22 09:36:19 +00001909 bool dalvik_cache_exists_;
Vladimir Marko82e1e272018-08-20 13:38:06 +00001910 std::string dalvik_cache_;
1911 std::string cache_filename_;
1912};
1913
Andreas Gampea463b6a2016-08-12 21:53:32 -07001914static constexpr uint64_t kLowSpaceValue = 50 * MB;
1915static constexpr uint64_t kTmpFsSentinelValue = 384 * MB;
1916
1917// Read the free space of the cache partition and make a decision whether to keep the generated
1918// image. This is to try to mitigate situations where the system might run out of space later.
1919static bool CheckSpace(const std::string& cache_filename, std::string* error_msg) {
1920 // Using statvfs vs statvfs64 because of b/18207376, and it is enough for all practical purposes.
1921 struct statvfs buf;
1922
1923 int res = TEMP_FAILURE_RETRY(statvfs(cache_filename.c_str(), &buf));
1924 if (res != 0) {
1925 // Could not stat. Conservatively tell the system to delete the image.
1926 *error_msg = "Could not stat the filesystem, assuming low-memory situation.";
1927 return false;
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +00001928 }
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +00001929
Andreas Gampea463b6a2016-08-12 21:53:32 -07001930 uint64_t fs_overall_size = buf.f_bsize * static_cast<uint64_t>(buf.f_blocks);
1931 // Zygote is privileged, but other things are not. Use bavail.
1932 uint64_t fs_free_size = buf.f_bsize * static_cast<uint64_t>(buf.f_bavail);
Brian Carlstrom56d947f2013-07-15 13:14:23 -07001933
Andreas Gampea463b6a2016-08-12 21:53:32 -07001934 // Take the overall size as an indicator for a tmpfs, which is being used for the decryption
1935 // environment. We do not want to fail quickening the boot image there, as it is beneficial
1936 // for time-to-UI.
1937 if (fs_overall_size > kTmpFsSentinelValue) {
1938 if (fs_free_size < kLowSpaceValue) {
1939 *error_msg = StringPrintf("Low-memory situation: only %4.2f megabytes available, need at "
1940 "least %" PRIu64 ".",
1941 static_cast<double>(fs_free_size) / MB,
1942 kLowSpaceValue / MB);
Brian Carlstrom56d947f2013-07-15 13:14:23 -07001943 return false;
1944 }
1945 }
1946 return true;
1947}
1948
Vladimir Marko82e1e272018-08-20 13:38:06 +00001949bool ImageSpace::LoadBootImage(
Vladimir Marko91f10322018-12-07 18:04:10 +00001950 const std::vector<std::string>& boot_class_path,
1951 const std::vector<std::string>& boot_class_path_locations,
Vladimir Marko82e1e272018-08-20 13:38:06 +00001952 const std::string& image_location,
1953 const InstructionSet image_isa,
Andreas Gampe86823542019-02-25 09:38:49 -08001954 ImageSpaceLoadingOrder order,
Vladimir Marko3364d182019-03-13 13:55:01 +00001955 bool relocate,
1956 bool executable,
1957 bool is_zygote,
Vladimir Markod44d7032018-08-30 13:02:31 +01001958 size_t extra_reservation_size,
1959 /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
1960 /*out*/MemMap* extra_reservation) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001961 ScopedTrace trace(__FUNCTION__);
1962
Vladimir Marko82e1e272018-08-20 13:38:06 +00001963 DCHECK(boot_image_spaces != nullptr);
1964 DCHECK(boot_image_spaces->empty());
Vladimir Markod44d7032018-08-30 13:02:31 +01001965 DCHECK_ALIGNED(extra_reservation_size, kPageSize);
1966 DCHECK(extra_reservation != nullptr);
Vladimir Marko82e1e272018-08-20 13:38:06 +00001967 DCHECK_NE(image_isa, InstructionSet::kNone);
1968
1969 if (image_location.empty()) {
1970 return false;
1971 }
1972
Vladimir Marko3364d182019-03-13 13:55:01 +00001973 BootImageLoader loader(boot_class_path,
1974 boot_class_path_locations,
1975 image_location,
1976 image_isa,
1977 relocate,
1978 executable,
1979 is_zygote);
Vladimir Marko82e1e272018-08-20 13:38:06 +00001980
Andreas Gampea463b6a2016-08-12 21:53:32 -07001981 // Step 0: Extra zygote work.
1982
1983 // Step 0.a: If we're the zygote, mark boot.
Vladimir Marko82e1e272018-08-20 13:38:06 +00001984 if (loader.IsZygote() && CanWriteToDalvikCache(image_isa)) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07001985 MarkZygoteStart(image_isa, Runtime::Current()->GetZygoteMaxFailedBoots());
1986 }
1987
Vladimir Marko82e1e272018-08-20 13:38:06 +00001988 loader.FindImageFiles();
1989
Andreas Gampea463b6a2016-08-12 21:53:32 -07001990 // Step 0.b: If we're the zygote, check for free space, and prune the cache preemptively,
1991 // if necessary. While the runtime may be fine (it is pretty tolerant to
1992 // out-of-disk-space situations), other parts of the platform are not.
1993 //
1994 // The advantage of doing this proactively is that the later steps are simplified,
1995 // i.e., we do not need to code retries.
Vladimir Marko3364d182019-03-13 13:55:01 +00001996 bool low_space = false;
Vladimir Marko82e1e272018-08-20 13:38:06 +00001997 if (loader.IsZygote() && loader.DalvikCacheExists()) {
Andreas Gampe6e74abb2018-03-01 17:33:19 -08001998 // Extra checks for the zygote. These only apply when loading the first image, explained below.
Vladimir Marko82e1e272018-08-20 13:38:06 +00001999 const std::string& dalvik_cache = loader.GetDalvikCache();
Andreas Gampea463b6a2016-08-12 21:53:32 -07002000 DCHECK(!dalvik_cache.empty());
2001 std::string local_error_msg;
Andreas Gampe6e74abb2018-03-01 17:33:19 -08002002 bool check_space = CheckSpace(dalvik_cache, &local_error_msg);
Vladimir Marko4df2d802018-09-27 16:42:44 +00002003 if (!check_space) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07002004 LOG(WARNING) << local_error_msg << " Preemptively pruning the dalvik cache.";
2005 PruneDalvikCache(image_isa);
2006
2007 // Re-evaluate the image.
Vladimir Marko82e1e272018-08-20 13:38:06 +00002008 loader.FindImageFiles();
Vladimir Marko3364d182019-03-13 13:55:01 +00002009
Andreas Gampe6e74abb2018-03-01 17:33:19 -08002010 // Disable compilation/patching - we do not want to fill up the space again.
Vladimir Marko3364d182019-03-13 13:55:01 +00002011 low_space = true;
Andreas Gampe6e74abb2018-03-01 17:33:19 -08002012 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07002013 }
2014
2015 // Collect all the errors.
2016 std::vector<std::string> error_msgs;
2017
Andreas Gampe86823542019-02-25 09:38:49 -08002018 auto try_load_from = [&](auto has_fn, auto load_fn, bool validate_oat_file) {
2019 if ((loader.*has_fn)()) {
2020 std::string local_error_msg;
2021 if ((loader.*load_fn)(validate_oat_file,
2022 extra_reservation_size,
2023 boot_image_spaces,
2024 extra_reservation,
2025 &local_error_msg)) {
2026 return true;
2027 }
2028 error_msgs.push_back(local_error_msg);
2029 }
2030 return false;
2031 };
Andreas Gampea463b6a2016-08-12 21:53:32 -07002032
Andreas Gampe86823542019-02-25 09:38:49 -08002033 auto try_load_from_system = [&]() {
Andreas Gampe96b3baa2019-03-12 12:45:58 -07002034 // Validate the oat files if the loading order checks data first. Otherwise assume system
2035 // integrity.
2036 return try_load_from(&BootImageLoader::HasSystem,
2037 &BootImageLoader::LoadFromSystem,
2038 /*validate_oat_file=*/ order != ImageSpaceLoadingOrder::kSystemFirst);
Andreas Gampe86823542019-02-25 09:38:49 -08002039 };
2040 auto try_load_from_cache = [&]() {
Andreas Gampe96b3baa2019-03-12 12:45:58 -07002041 // Always validate oat files from the dalvik cache.
2042 return try_load_from(&BootImageLoader::HasCache,
2043 &BootImageLoader::LoadFromDalvikCache,
2044 /*validate_oat_file=*/ true);
Andreas Gampe86823542019-02-25 09:38:49 -08002045 };
2046
2047 auto invoke_sequentially = [](auto first, auto second) {
2048 return first() || second();
2049 };
2050
2051 // Step 1+2: Check system and cache images in the asked-for order.
2052 if (order == ImageSpaceLoadingOrder::kSystemFirst) {
2053 if (invoke_sequentially(try_load_from_system, try_load_from_cache)) {
Vladimir Marko82e1e272018-08-20 13:38:06 +00002054 return true;
Andreas Gampea463b6a2016-08-12 21:53:32 -07002055 }
Andreas Gampe86823542019-02-25 09:38:49 -08002056 } else {
2057 if (invoke_sequentially(try_load_from_cache, try_load_from_system)) {
Vladimir Marko4df2d802018-09-27 16:42:44 +00002058 return true;
Andreas Gampea463b6a2016-08-12 21:53:32 -07002059 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07002060 }
2061
Vladimir Marko82e1e272018-08-20 13:38:06 +00002062 // Step 3: We do not have an existing image in /system,
2063 // so generate an image into the dalvik cache.
Vladimir Markoe3070022018-08-22 09:36:19 +00002064 if (!loader.HasSystem() && loader.DalvikCacheExists()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07002065 std::string local_error_msg;
Vladimir Marko3364d182019-03-13 13:55:01 +00002066 if (low_space || !Runtime::Current()->IsImageDex2OatEnabled()) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07002067 local_error_msg = "Image compilation disabled.";
Vladimir Marko3364d182019-03-13 13:55:01 +00002068 } else if (ImageCreationAllowed(loader.IsGlobalCache(),
2069 image_isa,
2070 is_zygote,
2071 &local_error_msg)) {
Vladimir Marko82e1e272018-08-20 13:38:06 +00002072 bool compilation_success =
2073 GenerateImage(loader.GetCacheFilename(), image_isa, &local_error_msg);
Andreas Gampea463b6a2016-08-12 21:53:32 -07002074 if (compilation_success) {
Vladimir Markof4efa9e2018-10-17 14:12:45 +01002075 if (loader.LoadFromDalvikCache(/*validate_oat_file=*/ false,
Vladimir Markod44d7032018-08-30 13:02:31 +01002076 extra_reservation_size,
Vladimir Marko82e1e272018-08-20 13:38:06 +00002077 boot_image_spaces,
Vladimir Markod44d7032018-08-30 13:02:31 +01002078 extra_reservation,
Vladimir Marko82e1e272018-08-20 13:38:06 +00002079 &local_error_msg)) {
2080 return true;
Andreas Gampea463b6a2016-08-12 21:53:32 -07002081 }
2082 }
2083 }
2084 error_msgs.push_back(StringPrintf("Cannot compile image to %s: %s",
Vladimir Marko82e1e272018-08-20 13:38:06 +00002085 loader.GetCacheFilename().c_str(),
Andreas Gampea463b6a2016-08-12 21:53:32 -07002086 local_error_msg.c_str()));
2087 }
2088
Vladimir Marko82e1e272018-08-20 13:38:06 +00002089 // We failed. Prune the cache the free up space, create a compound error message
2090 // and return false.
Vladimir Marko3364d182019-03-13 13:55:01 +00002091 if (loader.DalvikCacheExists()) {
2092 PruneDalvikCache(image_isa);
2093 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07002094
2095 std::ostringstream oss;
2096 bool first = true;
Andreas Gampe4c481a42016-11-03 08:21:59 -07002097 for (const auto& msg : error_msgs) {
Andreas Gampea463b6a2016-08-12 21:53:32 -07002098 if (!first) {
2099 oss << "\n ";
2100 }
2101 oss << msg;
2102 }
Andreas Gampea463b6a2016-08-12 21:53:32 -07002103
Vladimir Marko82e1e272018-08-20 13:38:06 +00002104 LOG(ERROR) << "Could not create image space with image file '" << image_location << "'. "
2105 << "Attempting to fall back to imageless running. Error was: " << oss.str();
Andreas Gampea463b6a2016-08-12 21:53:32 -07002106
Vladimir Marko82e1e272018-08-20 13:38:06 +00002107 return false;
Andreas Gampe2bd84282016-12-05 12:37:36 -08002108}
2109
Igor Murashkin8275fba2017-05-02 15:58:02 -07002110ImageSpace::~ImageSpace() {
Vladimir Marko3364d182019-03-13 13:55:01 +00002111 // Everything done by member destructors. Classes forward-declared in header are now defined.
Igor Murashkin8275fba2017-05-02 15:58:02 -07002112}
2113
Andreas Gampea463b6a2016-08-12 21:53:32 -07002114std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image,
2115 const OatFile* oat_file,
2116 std::string* error_msg) {
Vladimir Marko312f10e2018-11-21 12:35:24 +00002117 // Note: The oat file has already been validated.
Vladimir Marko4df2d802018-09-27 16:42:44 +00002118 return Loader::InitAppImage(image,
2119 image,
Vladimir Marko4df2d802018-09-27 16:42:44 +00002120 oat_file,
Vladimir Markof4efa9e2018-10-17 14:12:45 +01002121 /*image_reservation=*/ nullptr,
Vladimir Marko4df2d802018-09-27 16:42:44 +00002122 error_msg);
Andreas Gampea463b6a2016-08-12 21:53:32 -07002123}
2124
Andreas Gampe22f8e5c2014-07-09 11:38:21 -07002125const OatFile* ImageSpace::GetOatFile() const {
Andreas Gampe88da3b02015-06-12 20:38:49 -07002126 return oat_file_non_owned_;
Andreas Gampe22f8e5c2014-07-09 11:38:21 -07002127}
2128
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -07002129std::unique_ptr<const OatFile> ImageSpace::ReleaseOatFile() {
2130 CHECK(oat_file_ != nullptr);
2131 return std::move(oat_file_);
Ian Rogers1d54e732013-05-02 21:10:01 -07002132}
2133
Ian Rogers1d54e732013-05-02 21:10:01 -07002134void ImageSpace::Dump(std::ostream& os) const {
2135 os << GetType()
Mathieu Chartier590fee92013-09-13 13:46:47 -07002136 << " begin=" << reinterpret_cast<void*>(Begin())
Ian Rogers1d54e732013-05-02 21:10:01 -07002137 << ",end=" << reinterpret_cast<void*>(End())
2138 << ",size=" << PrettySize(Size())
2139 << ",name=\"" << GetName() << "\"]";
2140}
2141
Richard Uhler84f50ae2017-02-06 15:12:45 +00002142bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg) {
David Sehr013fd802018-01-11 22:55:24 -08002143 const ArtDexFileLoader dex_file_loader;
Andreas Gampeb40d3612018-06-26 15:49:42 -07002144 for (const OatDexFile* oat_dex_file : oat_file.GetOatDexFiles()) {
Richard Uhler84f50ae2017-02-06 15:12:45 +00002145 const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
2146
2147 // Skip multidex locations - These will be checked when we visit their
2148 // corresponding primary non-multidex location.
Mathieu Chartier79c87da2017-10-10 11:54:29 -07002149 if (DexFileLoader::IsMultiDexLocation(dex_file_location.c_str())) {
Richard Uhler84f50ae2017-02-06 15:12:45 +00002150 continue;
2151 }
2152
2153 std::vector<uint32_t> checksums;
David Sehr013fd802018-01-11 22:55:24 -08002154 if (!dex_file_loader.GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) {
Richard Uhler84f50ae2017-02-06 15:12:45 +00002155 *error_msg = StringPrintf("ValidateOatFile failed to get checksums of dex file '%s' "
2156 "referenced by oat file %s: %s",
2157 dex_file_location.c_str(),
2158 oat_file.GetLocation().c_str(),
2159 error_msg->c_str());
2160 return false;
2161 }
2162 CHECK(!checksums.empty());
2163 if (checksums[0] != oat_dex_file->GetDexFileLocationChecksum()) {
2164 *error_msg = StringPrintf("ValidateOatFile found checksum mismatch between oat file "
2165 "'%s' and dex file '%s' (0x%x != 0x%x)",
2166 oat_file.GetLocation().c_str(),
2167 dex_file_location.c_str(),
2168 oat_dex_file->GetDexFileLocationChecksum(),
2169 checksums[0]);
2170 return false;
2171 }
2172
2173 // Verify checksums for any related multidex entries.
2174 for (size_t i = 1; i < checksums.size(); i++) {
Mathieu Chartier79c87da2017-10-10 11:54:29 -07002175 std::string multi_dex_location = DexFileLoader::GetMultiDexLocation(
2176 i,
2177 dex_file_location.c_str());
Andreas Gampeb40d3612018-06-26 15:49:42 -07002178 const OatDexFile* multi_dex = oat_file.GetOatDexFile(multi_dex_location.c_str(),
2179 nullptr,
2180 error_msg);
Richard Uhler84f50ae2017-02-06 15:12:45 +00002181 if (multi_dex == nullptr) {
2182 *error_msg = StringPrintf("ValidateOatFile oat file '%s' is missing entry '%s'",
2183 oat_file.GetLocation().c_str(),
2184 multi_dex_location.c_str());
2185 return false;
2186 }
2187
2188 if (checksums[i] != multi_dex->GetDexFileLocationChecksum()) {
2189 *error_msg = StringPrintf("ValidateOatFile found checksum mismatch between oat file "
2190 "'%s' and dex file '%s' (0x%x != 0x%x)",
2191 oat_file.GetLocation().c_str(),
2192 multi_dex_location.c_str(),
2193 multi_dex->GetDexFileLocationChecksum(),
2194 checksums[i]);
2195 return false;
2196 }
2197 }
2198 }
2199 return true;
2200}
2201
Vladimir Markobcd99be2019-03-22 16:21:31 +00002202std::string ImageSpace::GetBootClassPathChecksums(ArrayRef<const std::string> boot_class_path,
Vladimir Marko0ace5632018-12-14 11:11:47 +00002203 const std::string& image_location,
2204 InstructionSet image_isa,
Andreas Gampe86823542019-02-25 09:38:49 -08002205 ImageSpaceLoadingOrder order,
Vladimir Marko0ace5632018-12-14 11:11:47 +00002206 /*out*/std::string* error_msg) {
2207 std::string system_filename;
2208 bool has_system = false;
2209 std::string cache_filename;
2210 bool has_cache = false;
2211 bool dalvik_cache_exists = false;
2212 bool is_global_cache = false;
2213 if (!FindImageFilename(image_location.c_str(),
2214 image_isa,
2215 &system_filename,
2216 &has_system,
2217 &cache_filename,
2218 &dalvik_cache_exists,
2219 &has_cache,
2220 &is_global_cache)) {
2221 *error_msg = StringPrintf("Unable to find image file for %s and %s",
2222 image_location.c_str(),
2223 GetInstructionSetString(image_isa));
2224 return std::string();
2225 }
2226
2227 DCHECK(has_system || has_cache);
Andreas Gampe86823542019-02-25 09:38:49 -08002228 const std::string& filename = (order == ImageSpaceLoadingOrder::kSystemFirst)
2229 ? (has_system ? system_filename : cache_filename)
2230 : (has_cache ? cache_filename : system_filename);
Vladimir Marko0ace5632018-12-14 11:11:47 +00002231 std::unique_ptr<ImageHeader> header = ReadSpecificImageHeader(filename.c_str(), error_msg);
2232 if (header == nullptr) {
2233 return std::string();
2234 }
2235 if (header->GetComponentCount() == 0u || header->GetComponentCount() > boot_class_path.size()) {
2236 *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
2237 "expected non-zero and <= %zu",
2238 filename.c_str(),
2239 header->GetComponentCount(),
2240 boot_class_path.size());
2241 return std::string();
2242 }
2243
2244 std::string boot_image_checksum =
2245 StringPrintf("i;%d/%08x", header->GetComponentCount(), header->GetImageChecksum());
2246 ArrayRef<const std::string> boot_class_path_tail =
2247 ArrayRef<const std::string>(boot_class_path).SubArray(header->GetComponentCount());
2248 for (const std::string& bcp_filename : boot_class_path_tail) {
2249 std::vector<std::unique_ptr<const DexFile>> dex_files;
2250 const ArtDexFileLoader dex_file_loader;
2251 if (!dex_file_loader.Open(bcp_filename.c_str(),
2252 bcp_filename, // The location does not matter here.
2253 /*verify=*/ false,
2254 /*verify_checksum=*/ false,
2255 error_msg,
2256 &dex_files)) {
2257 return std::string();
2258 }
2259 DCHECK(!dex_files.empty());
2260 StringAppendF(&boot_image_checksum, ":d");
2261 for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
2262 StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
2263 }
2264 }
2265 return boot_image_checksum;
2266}
2267
2268std::string ImageSpace::GetBootClassPathChecksums(
2269 const std::vector<ImageSpace*>& image_spaces,
2270 const std::vector<const DexFile*>& boot_class_path) {
Vladimir Marko0ace5632018-12-14 11:11:47 +00002271 size_t pos = 0u;
David Brazdil2c5bcb82019-04-03 11:14:34 +01002272 std::string boot_image_checksum;
2273
2274 if (!image_spaces.empty()) {
2275 const ImageHeader& primary_header = image_spaces.front()->GetImageHeader();
2276 uint32_t component_count = primary_header.GetComponentCount();
2277 DCHECK_EQ(component_count, image_spaces.size());
2278 boot_image_checksum =
2279 StringPrintf("i;%d/%08x", component_count, primary_header.GetImageChecksum());
2280 for (const ImageSpace* space : image_spaces) {
2281 size_t num_dex_files = space->oat_file_non_owned_->GetOatDexFiles().size();
2282 if (kIsDebugBuild) {
2283 CHECK_NE(num_dex_files, 0u);
2284 CHECK_LE(space->oat_file_non_owned_->GetOatDexFiles().size(), boot_class_path.size() - pos);
2285 for (size_t i = 0; i != num_dex_files; ++i) {
2286 CHECK_EQ(space->oat_file_non_owned_->GetOatDexFiles()[i]->GetDexFileLocation(),
2287 boot_class_path[pos + i]->GetLocation());
2288 }
Vladimir Marko0ace5632018-12-14 11:11:47 +00002289 }
David Brazdil2c5bcb82019-04-03 11:14:34 +01002290 pos += num_dex_files;
Vladimir Marko0ace5632018-12-14 11:11:47 +00002291 }
Vladimir Marko0ace5632018-12-14 11:11:47 +00002292 }
David Brazdil2c5bcb82019-04-03 11:14:34 +01002293
Vladimir Marko0ace5632018-12-14 11:11:47 +00002294 ArrayRef<const DexFile* const> boot_class_path_tail =
2295 ArrayRef<const DexFile* const>(boot_class_path).SubArray(pos);
2296 DCHECK(boot_class_path_tail.empty() ||
2297 !DexFileLoader::IsMultiDexLocation(boot_class_path_tail.front()->GetLocation().c_str()));
2298 for (const DexFile* dex_file : boot_class_path_tail) {
2299 if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) {
David Brazdil2c5bcb82019-04-03 11:14:34 +01002300 StringAppendF(&boot_image_checksum, boot_image_checksum.empty() ? "d" : ":d");
Vladimir Marko0ace5632018-12-14 11:11:47 +00002301 }
2302 StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
2303 }
2304 return boot_image_checksum;
2305}
2306
Vladimir Marko91f10322018-12-07 18:04:10 +00002307std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
2308 const std::vector<std::string>& dex_locations,
2309 const std::string& image_location) {
Vladimir Marko0ace5632018-12-14 11:11:47 +00002310 return ExpandMultiImageLocations(ArrayRef<const std::string>(dex_locations), image_location);
2311}
2312
2313std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
2314 ArrayRef<const std::string> dex_locations,
2315 const std::string& image_location) {
Vladimir Marko91f10322018-12-07 18:04:10 +00002316 DCHECK(!dex_locations.empty());
Andreas Gampe8994a042015-12-30 19:03:17 +00002317
Vladimir Marko91f10322018-12-07 18:04:10 +00002318 // Find the path.
2319 size_t last_slash = image_location.rfind('/');
2320 CHECK_NE(last_slash, std::string::npos);
Andreas Gampe8994a042015-12-30 19:03:17 +00002321
Vladimir Marko91f10322018-12-07 18:04:10 +00002322 // We also need to honor path components that were encoded through '@'. Otherwise the loading
2323 // code won't be able to find the images.
2324 if (image_location.find('@', last_slash) != std::string::npos) {
2325 last_slash = image_location.rfind('@');
Andreas Gampe8994a042015-12-30 19:03:17 +00002326 }
Andreas Gampe8994a042015-12-30 19:03:17 +00002327
Vladimir Marko91f10322018-12-07 18:04:10 +00002328 // Find the dot separating the primary image name from the extension.
2329 size_t last_dot = image_location.rfind('.');
2330 // Extract the extension and base (the path and primary image name).
2331 std::string extension;
2332 std::string base = image_location;
2333 if (last_dot != std::string::npos && last_dot > last_slash) {
2334 extension = image_location.substr(last_dot); // Including the dot.
2335 base.resize(last_dot);
Andreas Gampe8994a042015-12-30 19:03:17 +00002336 }
Vladimir Marko91f10322018-12-07 18:04:10 +00002337 // For non-empty primary image name, add '-' to the `base`.
2338 if (last_slash + 1u != base.size()) {
2339 base += '-';
2340 }
2341
2342 std::vector<std::string> locations;
2343 locations.reserve(dex_locations.size());
2344 locations.push_back(image_location);
2345
2346 // Now create the other names. Use a counted loop to skip the first one.
2347 for (size_t i = 1u; i < dex_locations.size(); ++i) {
2348 // Replace path with `base` (i.e. image path and prefix) and replace the original
2349 // extension (if any) with `extension`.
2350 std::string name = dex_locations[i];
2351 size_t last_dex_slash = name.rfind('/');
2352 if (last_dex_slash != std::string::npos) {
2353 name = name.substr(last_dex_slash + 1);
2354 }
2355 size_t last_dex_dot = name.rfind('.');
2356 if (last_dex_dot != std::string::npos) {
2357 name.resize(last_dex_dot);
2358 }
2359 locations.push_back(base + name + extension);
2360 }
2361 return locations;
Andreas Gampe8994a042015-12-30 19:03:17 +00002362}
2363
Mathieu Chartierd5f3f322016-03-21 14:05:56 -07002364void ImageSpace::DumpSections(std::ostream& os) const {
2365 const uint8_t* base = Begin();
2366 const ImageHeader& header = GetImageHeader();
2367 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
2368 auto section_type = static_cast<ImageHeader::ImageSections>(i);
2369 const ImageSection& section = header.GetImageSection(section_type);
2370 os << section_type << " " << reinterpret_cast<const void*>(base + section.Offset())
2371 << "-" << reinterpret_cast<const void*>(base + section.End()) << "\n";
2372 }
2373}
2374
Mathieu Chartier6e7a72c2019-03-07 21:40:10 -08002375void ImageSpace::DisablePreResolvedStrings() {
2376 // Clear dex cache pointers.
2377 ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
2378 GetImageHeader().GetImageRoot(ImageHeader::kDexCaches)->AsObjectArray<mirror::DexCache>();
2379 for (size_t len = dex_caches->GetLength(), i = 0; i < len; ++i) {
2380 ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
2381 dex_cache->ClearPreResolvedStrings();
2382 }
2383}
2384
2385void ImageSpace::ReleaseMetadata() {
2386 const ImageSection& metadata = GetImageHeader().GetMetadataSection();
2387 VLOG(image) << "Releasing " << metadata.Size() << " image metadata bytes";
2388 // In the case where new app images may have been added around the checkpoint, ensure that we
2389 // don't madvise the cache for these.
2390 ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
2391 GetImageHeader().GetImageRoot(ImageHeader::kDexCaches)->AsObjectArray<mirror::DexCache>();
2392 bool have_startup_cache = false;
2393 for (size_t len = dex_caches->GetLength(), i = 0; i < len; ++i) {
2394 ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
2395 if (dex_cache->NumPreResolvedStrings() != 0u) {
2396 have_startup_cache = true;
2397 }
2398 }
2399 // Only safe to do for images that have their preresolved strings caches disabled. This is because
2400 // uncompressed images madvise to the original unrelocated image contents.
2401 if (!have_startup_cache) {
2402 // Avoid using ZeroAndReleasePages since the zero fill might not be word atomic.
2403 uint8_t* const page_begin = AlignUp(Begin() + metadata.Offset(), kPageSize);
2404 uint8_t* const page_end = AlignDown(Begin() + metadata.End(), kPageSize);
2405 if (page_begin < page_end) {
2406 CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
2407 }
2408 }
2409}
2410
Ian Rogers1d54e732013-05-02 21:10:01 -07002411} // namespace space
2412} // namespace gc
2413} // namespace art