blob: 51976c511fdbe74ad2ed33526a9170e0c76bdad4 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_COMPILER_IMAGE_WRITER_H_
18#define ART_COMPILER_IMAGE_WRITER_H_
Brian Carlstrom7940e442013-07-12 13:46:57 -070019
20#include <stdint.h>
Evgenii Stepanov1e133742015-05-20 12:30:59 -070021#include "base/memory_tool.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070022
23#include <cstddef>
Ian Rogers700a4022014-05-19 16:49:03 -070024#include <memory>
Brian Carlstrom7940e442013-07-12 13:46:57 -070025#include <set>
26#include <string>
Igor Murashkinf5b4c502014-11-14 15:01:59 -080027#include <ostream>
Brian Carlstrom7940e442013-07-12 13:46:57 -070028
Vladimir Marko80afd022015-05-19 18:08:00 +010029#include "base/bit_utils.h"
Vladimir Marko944da602016-02-19 12:27:55 +000030#include "base/dchecked_vector.h"
Alex Lighte64300b2015-12-15 15:02:47 -080031#include "base/length_prefixed_array.h"
Igor Murashkin46774762014-10-22 11:37:02 -070032#include "base/macros.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070033#include "driver/compiler_driver.h"
Mathieu Chartierfd04b6f2014-11-14 19:34:18 -080034#include "gc/space/space.h"
Mathieu Chartierceb07b32015-12-10 09:33:21 -080035#include "image.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070036#include "lock_word.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070037#include "mem_map.h"
38#include "oat_file.h"
39#include "mirror/dex_cache.h"
40#include "os.h"
41#include "safe_map.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070042#include "utils.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070043
44namespace art {
Mathieu Chartierda5b28a2015-11-05 08:03:47 -080045namespace gc {
46namespace space {
47class ImageSpace;
48} // namespace space
49} // namespace gc
Brian Carlstrom7940e442013-07-12 13:46:57 -070050
Mathieu Chartier1f47b672016-01-07 16:29:01 -080051class ClassTable;
52
Mathieu Chartierfbc31082016-01-24 11:59:56 -080053static constexpr int kInvalidFd = -1;
Mathieu Chartiera90c7722015-10-29 15:41:36 -070054
Brian Carlstrom7940e442013-07-12 13:46:57 -070055// Write a Space built during compilation for use during execution.
Igor Murashkin46774762014-10-22 11:37:02 -070056class ImageWriter FINAL {
Brian Carlstrom7940e442013-07-12 13:46:57 -070057 public:
Mathieu Chartierda5b28a2015-11-05 08:03:47 -080058 ImageWriter(const CompilerDriver& compiler_driver,
59 uintptr_t image_begin,
60 bool compile_pic,
Mathieu Chartierceb07b32015-12-10 09:33:21 -080061 bool compile_app_image,
Jeff Haodcdc85b2015-12-04 14:06:18 -080062 ImageHeader::StorageMode image_storage_mode,
Vladimir Marko944da602016-02-19 12:27:55 +000063 const std::vector<const char*>& oat_filenames,
64 const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map);
Brian Carlstrom7940e442013-07-12 13:46:57 -070065
Vladimir Markof4da6752014-08-01 19:04:18 +010066 bool PrepareImageAddressSpace();
67
68 bool IsImageAddressSpaceReady() const {
Vladimir Marko944da602016-02-19 12:27:55 +000069 DCHECK(!image_infos_.empty());
70 for (const ImageInfo& image_info : image_infos_) {
Jeff Haodcdc85b2015-12-04 14:06:18 -080071 if (image_info.image_roots_address_ == 0u) {
72 return false;
73 }
74 }
Vladimir Marko944da602016-02-19 12:27:55 +000075 return true;
Vladimir Markof4da6752014-08-01 19:04:18 +010076 }
77
Mathieu Chartiere401d142015-04-22 13:56:20 -070078 template <typename T>
Mathieu Chartier90443472015-07-16 20:32:27 -070079 T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
Jeff Haodcdc85b2015-12-04 14:06:18 -080080 if (object == nullptr || IsInBootImage(object)) {
81 return object;
82 } else {
Vladimir Marko944da602016-02-19 12:27:55 +000083 size_t oat_index = GetOatIndex(object);
84 const ImageInfo& image_info = GetImageInfo(oat_index);
Jeff Haodcdc85b2015-12-04 14:06:18 -080085 return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
86 }
Vladimir Markof4da6752014-08-01 19:04:18 +010087 }
88
Mathieu Chartier90443472015-07-16 20:32:27 -070089 ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -070090
Vladimir Marko05792b92015-08-03 11:56:49 +010091 template <typename PtrType>
92 PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
93 const SHARED_REQUIRES(Locks::mutator_lock_) {
Vladimir Marko944da602016-02-19 12:27:55 +000094 auto oat_it = dex_file_oat_index_map_.find(dex_file);
95 DCHECK(oat_it != dex_file_oat_index_map_.end());
96 const ImageInfo& image_info = GetImageInfo(oat_it->second);
Jeff Haodcdc85b2015-12-04 14:06:18 -080097 auto it = image_info.dex_cache_array_starts_.find(dex_file);
98 DCHECK(it != image_info.dex_cache_array_starts_.end());
Vladimir Marko05792b92015-08-03 11:56:49 +010099 return reinterpret_cast<PtrType>(
Jeff Haodcdc85b2015-12-04 14:06:18 -0800100 image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] +
101 it->second + offset);
Vladimir Marko20f85592015-03-19 10:07:02 +0000102 }
103
Vladimir Marko944da602016-02-19 12:27:55 +0000104 size_t GetOatFileOffset(size_t oat_index) const {
105 return GetImageInfo(oat_index).oat_offset_;
106 }
107
108 const uint8_t* GetOatFileBegin(size_t oat_index) const {
109 return GetImageInfo(oat_index).oat_file_begin_;
110 }
Vladimir Markof4da6752014-08-01 19:04:18 +0100111
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800112 // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open
Jeff Haodcdc85b2015-12-04 14:06:18 -0800113 // the names in image_filenames.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800114 // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open
115 // the names in oat_filenames.
Mathieu Chartiera90c7722015-10-29 15:41:36 -0700116 bool Write(int image_fd,
Jeff Haodcdc85b2015-12-04 14:06:18 -0800117 const std::vector<const char*>& image_filenames,
Vladimir Marko944da602016-02-19 12:27:55 +0000118 const std::vector<const char*>& oat_filenames)
Mathieu Chartier90443472015-07-16 20:32:27 -0700119 REQUIRES(!Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700120
Vladimir Marko944da602016-02-19 12:27:55 +0000121 uintptr_t GetOatDataBegin(size_t oat_index) {
122 return reinterpret_cast<uintptr_t>(GetImageInfo(oat_index).oat_data_begin_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700123 }
124
Vladimir Marko944da602016-02-19 12:27:55 +0000125 // Get the index of the oat file containing the dex file.
126 //
127 // This "oat_index" is used to retrieve information about the the memory layout
128 // of the oat file and its associated image file, needed for link-time patching
129 // of references to the image or across oat files.
130 size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
131
132 // Get the index of the oat file containing the dex file served by the dex cache.
133 size_t GetOatIndexForDexCache(mirror::DexCache* dex_cache) const
Jeff Haodcdc85b2015-12-04 14:06:18 -0800134 SHARED_REQUIRES(Locks::mutator_lock_);
135
Vladimir Marko944da602016-02-19 12:27:55 +0000136 // Update the oat layout for the given oat file.
137 // This will make the oat_offset for the next oat file valid.
138 void UpdateOatFileLayout(size_t oat_index,
139 size_t oat_loaded_size,
140 size_t oat_data_offset,
141 size_t oat_data_size);
142 // Update information about the oat header, i.e. checksum and trampoline offsets.
143 void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800144
Brian Carlstrom7940e442013-07-12 13:46:57 -0700145 private:
146 bool AllocMemory();
147
Mathieu Chartier31e89252013-08-28 11:29:12 -0700148 // Mark the objects defined in this space in the given live bitmap.
Mathieu Chartier90443472015-07-16 20:32:27 -0700149 void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier31e89252013-08-28 11:29:12 -0700150
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800151 // Classify different kinds of bins that objects end up getting packed into during image writing.
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700152 // Ordered from dirtiest to cleanest (until ArtMethods).
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800153 enum Bin {
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700154 kBinMiscDirty, // Dex caches, object locks, etc...
155 kBinClassVerified, // Class verified, but initializers haven't been run
Mathieu Chartierd464fa12016-04-08 18:54:36 -0700156 // Unknown mix of clean/dirty:
157 kBinRegular,
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700158 kBinClassInitialized, // Class initializers have been run
Mathieu Chartierd464fa12016-04-08 18:54:36 -0700159 // All classes get their own bins since their fields often dirty
160 kBinClassInitializedFinalStatics, // Class initializers have been run, no non-final statics
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700161 // Likely-clean:
162 kBinString, // [String] Almost always immutable (except for obj header).
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800163 // Add more bins here if we add more segregation code.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700164 // Non mirror fields must be below.
165 // ArtFields should be always clean.
Mathieu Chartierc7853442015-03-27 14:35:38 -0700166 kBinArtField,
Mathieu Chartiere401d142015-04-22 13:56:20 -0700167 // If the class is initialized, then the ArtMethods are probably clean.
168 kBinArtMethodClean,
169 // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
170 // initialized.
171 kBinArtMethodDirty,
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700172 // Conflict tables (clean).
173 kBinIMTConflictTable,
174 // Runtime methods (always clean, do not have a length prefix array).
175 kBinRuntimeMethod,
Vladimir Marko05792b92015-08-03 11:56:49 +0100176 // Dex cache arrays have a special slot for PC-relative addressing. Since they are
177 // huge, and as such their dirtiness is not important for the clean/dirty separation,
178 // we arbitrarily keep them at the end of the native data.
179 kBinDexCacheArray, // Arrays belonging to dex cache.
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800180 kBinSize,
Mathieu Chartierc7853442015-03-27 14:35:38 -0700181 // Number of bins which are for mirror objects.
182 kBinMirrorCount = kBinArtField,
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800183 };
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800184 friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
185
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700186 enum NativeObjectRelocationType {
187 kNativeObjectRelocationTypeArtField,
188 kNativeObjectRelocationTypeArtFieldArray,
189 kNativeObjectRelocationTypeArtMethodClean,
190 kNativeObjectRelocationTypeArtMethodArrayClean,
191 kNativeObjectRelocationTypeArtMethodDirty,
192 kNativeObjectRelocationTypeArtMethodArrayDirty,
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700193 kNativeObjectRelocationTypeRuntimeMethod,
194 kNativeObjectRelocationTypeIMTConflictTable,
Vladimir Marko05792b92015-08-03 11:56:49 +0100195 kNativeObjectRelocationTypeDexCacheArray,
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700196 };
197 friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
198
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800199 enum OatAddress {
200 kOatAddressInterpreterToInterpreterBridge,
201 kOatAddressInterpreterToCompiledCodeBridge,
202 kOatAddressJNIDlsymLookup,
203 kOatAddressQuickGenericJNITrampoline,
204 kOatAddressQuickIMTConflictTrampoline,
205 kOatAddressQuickResolutionTrampoline,
206 kOatAddressQuickToInterpreterBridge,
207 // Number of elements in the enum.
208 kOatAddressCount,
209 };
210 friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address);
211
Vladimir Marko80afd022015-05-19 18:08:00 +0100212 static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800213 // uint32 = typeof(lockword_)
Mathieu Chartiere401d142015-04-22 13:56:20 -0700214 // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
215 // failures due to invalid read barrier bits during object field reads.
216 static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits -
217 LockWord::kReadBarrierStateSize;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800218 // 111000.....0
Mathieu Chartiere401d142015-04-22 13:56:20 -0700219 static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800220
221 // We use the lock word to store the bin # and bin index of the object in the image.
222 //
223 // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
224 // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
225 struct BinSlot {
226 explicit BinSlot(uint32_t lockword);
227 BinSlot(Bin bin, uint32_t index);
228
229 // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
230 Bin GetBin() const;
231 // The offset in bytes from the beginning of the bin. Aligned to object size.
232 uint32_t GetIndex() const;
233 // Pack into a single uint32_t, for storing into a lock word.
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700234 uint32_t Uint32Value() const { return lockword_; }
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800235 // Comparison operator for map support
236 bool operator<(const BinSlot& other) const { return lockword_ < other.lockword_; }
237
238 private:
239 // Must be the same size as LockWord, any larger and we would truncate the data.
240 const uint32_t lockword_;
241 };
242
Jeff Haodcdc85b2015-12-04 14:06:18 -0800243 struct ImageInfo {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800244 ImageInfo();
245 ImageInfo(ImageInfo&&) = default;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800246
Mathieu Chartiera06ba052016-01-06 13:51:52 -0800247 // Create the image sections into the out sections variable, returns the size of the image
248 // excluding the bitmap.
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700249 size_t CreateImageSections(ImageSection* out_sections) const;
Mathieu Chartiera06ba052016-01-06 13:51:52 -0800250
Jeff Haodcdc85b2015-12-04 14:06:18 -0800251 std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
252
253 // Target begin of this image. Notes: It is not valid to write here, this is the address
254 // of the target image, not necessarily where image_ is mapped. The address is only valid
255 // after layouting (otherwise null).
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800256 uint8_t* image_begin_ = nullptr;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800257
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800258 // Offset to the free space in image_, initially size of image header.
259 size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
260 uint32_t image_roots_address_ = 0; // The image roots address in the image.
261 size_t image_offset_ = 0; // Offset of this image from the start of the first image.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800262
263 // Image size is the *address space* covered by this image. As the live bitmap is aligned
264 // to the page size, the live bitmap will cover more address space than necessary. But live
265 // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
266 // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
267 // page-aligned).
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800268 size_t image_size_ = 0;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800269
270 // Oat data.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800271 // Offset of the oat file for this image from start of oat files. This is
272 // valid when the previous oat file has been written.
273 size_t oat_offset_ = 0;
Vladimir Marko944da602016-02-19 12:27:55 +0000274 // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout().
275 const uint8_t* oat_file_begin_ = nullptr;
276 size_t oat_loaded_size_ = 0;
277 const uint8_t* oat_data_begin_ = nullptr;
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800278 size_t oat_size_ = 0; // Size of the corresponding oat data.
Vladimir Marko944da602016-02-19 12:27:55 +0000279 // The oat header checksum, valid after UpdateOatFileHeader().
280 uint32_t oat_checksum_ = 0u;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800281
282 // Image bitmap which lets us know where the objects inside of the image reside.
283 std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
284
285 // The start offsets of the dex cache arrays.
286 SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
287
288 // Offset from oat_data_begin_ to the stubs.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800289 uint32_t oat_address_offsets_[kOatAddressCount] = {};
Jeff Haodcdc85b2015-12-04 14:06:18 -0800290
291 // Bin slot tracking for dirty object packing.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800292 size_t bin_slot_sizes_[kBinSize] = {}; // Number of bytes in a bin.
293 size_t bin_slot_offsets_[kBinSize] = {}; // Number of bytes in previous bins.
294 size_t bin_slot_count_[kBinSize] = {}; // Number of objects in a bin.
295
296 // Cached size of the intern table for when we allocate memory.
297 size_t intern_table_bytes_ = 0;
298
Mathieu Chartier1f47b672016-01-07 16:29:01 -0800299 // Number of image class table bytes.
300 size_t class_table_bytes_ = 0;
301
302 // Intern table associated with this image for serialization.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800303 std::unique_ptr<InternTable> intern_table_;
Mathieu Chartier1f47b672016-01-07 16:29:01 -0800304
305 // Class table associated with this image for serialization.
306 std::unique_ptr<ClassTable> class_table_;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800307 };
308
Mathieu Chartier31e89252013-08-28 11:29:12 -0700309 // We use the lock word to store the offset of the object in the image.
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800310 void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
Mathieu Chartier90443472015-07-16 20:32:27 -0700311 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700312 void SetImageOffset(mirror::Object* object, size_t offset)
Mathieu Chartier90443472015-07-16 20:32:27 -0700313 SHARED_REQUIRES(Locks::mutator_lock_);
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700314 bool IsImageOffsetAssigned(mirror::Object* object) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700315 SHARED_REQUIRES(Locks::mutator_lock_);
316 size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700317 void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
Mathieu Chartier90443472015-07-16 20:32:27 -0700318 SHARED_REQUIRES(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700319
Mathieu Chartier90443472015-07-16 20:32:27 -0700320 void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_);
321 void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800322 void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
Mathieu Chartier90443472015-07-16 20:32:27 -0700323 SHARED_REQUIRES(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800324 bool IsImageBinSlotAssigned(mirror::Object* object) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700325 SHARED_REQUIRES(Locks::mutator_lock_);
326 BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800327
Jeff Haodcdc85b2015-12-04 14:06:18 -0800328 void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache)
329 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700330 void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700331
Alex Lighta59dd802014-07-02 16:28:08 -0700332 static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
Mathieu Chartier90443472015-07-16 20:32:27 -0700333 SHARED_REQUIRES(Locks::mutator_lock_) {
Alex Lighta59dd802014-07-02 16:28:08 -0700334 return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
335 }
336
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700337 mirror::Object* GetLocalAddress(mirror::Object* object) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700338 SHARED_REQUIRES(Locks::mutator_lock_) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700339 size_t offset = GetImageOffset(object);
Vladimir Marko944da602016-02-19 12:27:55 +0000340 size_t oat_index = GetOatIndex(object);
341 const ImageInfo& image_info = GetImageInfo(oat_index);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800342 uint8_t* dst = image_info.image_->Begin() + offset;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700343 return reinterpret_cast<mirror::Object*>(dst);
344 }
345
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800346 // Returns the address in the boot image if we are compiling the app image.
347 const uint8_t* GetOatAddress(OatAddress type) const;
348
Jeff Haodcdc85b2015-12-04 14:06:18 -0800349 const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700350 // With Quick, code is within the OatFile, as there are all in one
Jeff Haodcdc85b2015-12-04 14:06:18 -0800351 // .o ELF object. But interpret it as signed.
352 DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_));
353 DCHECK(image_info.oat_data_begin_ != nullptr);
354 return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700355 }
356
Brian Carlstrom7940e442013-07-12 13:46:57 -0700357 // Returns true if the class was in the original requested image classes list.
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800358 bool KeepClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700359
360 // Debug aid that list of requested image classes.
361 void DumpImageClasses();
362
363 // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
364 void ComputeLazyFieldsForImageClasses()
Mathieu Chartier90443472015-07-16 20:32:27 -0700365 SHARED_REQUIRES(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700366
Brian Carlstrom7940e442013-07-12 13:46:57 -0700367 // Remove unwanted classes from various roots.
Mathieu Chartier90443472015-07-16 20:32:27 -0700368 void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700369
370 // Verify unwanted classes removed.
Mathieu Chartier90443472015-07-16 20:32:27 -0700371 void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700372 static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700373 SHARED_REQUIRES(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700374
375 // Lays out where the image objects will be at runtime.
Vladimir Markof4da6752014-08-01 19:04:18 +0100376 void CalculateNewObjectOffsets()
Mathieu Chartier90443472015-07-16 20:32:27 -0700377 SHARED_REQUIRES(Locks::mutator_lock_);
Vladimir Marko944da602016-02-19 12:27:55 +0000378 void CreateHeader(size_t oat_index)
Mathieu Chartier90443472015-07-16 20:32:27 -0700379 SHARED_REQUIRES(Locks::mutator_lock_);
Vladimir Marko944da602016-02-19 12:27:55 +0000380 mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700381 SHARED_REQUIRES(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800382 void CalculateObjectBinSlots(mirror::Object* obj)
Mathieu Chartier90443472015-07-16 20:32:27 -0700383 SHARED_REQUIRES(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800384 void UnbinObjectsIntoOffset(mirror::Object* obj)
Mathieu Chartier90443472015-07-16 20:32:27 -0700385 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700386
387 void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
Mathieu Chartier90443472015-07-16 20:32:27 -0700388 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700389 void WalkFieldsInOrder(mirror::Object* obj)
Mathieu Chartier90443472015-07-16 20:32:27 -0700390 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700391 static void WalkFieldsCallback(mirror::Object* obj, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700392 SHARED_REQUIRES(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800393 static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700394 SHARED_REQUIRES(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700395
396 // Creates the contiguous image in memory and adjusts pointers.
Vladimir Marko944da602016-02-19 12:27:55 +0000397 void CopyAndFixupNativeData(size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700398 void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700399 static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700400 SHARED_REQUIRES(Locks::mutator_lock_);
401 void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800402 void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
Mathieu Chartier90443472015-07-16 20:32:27 -0700403 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700404 void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
405 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartierc7853442015-03-27 14:35:38 -0700406 void FixupClass(mirror::Class* orig, mirror::Class* copy)
Mathieu Chartier90443472015-07-16 20:32:27 -0700407 SHARED_REQUIRES(Locks::mutator_lock_);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800408 void FixupObject(mirror::Object* orig, mirror::Object* copy)
Mathieu Chartier90443472015-07-16 20:32:27 -0700409 SHARED_REQUIRES(Locks::mutator_lock_);
Vladimir Marko05792b92015-08-03 11:56:49 +0100410 void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
411 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800412 void FixupPointerArray(mirror::Object* dst,
413 mirror::PointerArray* arr,
414 mirror::Class* klass,
415 Bin array_type)
416 SHARED_REQUIRES(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700417
Mingyao Yang98d1cc82014-05-15 17:02:16 -0700418 // Get quick code for non-resolution/imt_conflict/abstract method.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800419 const uint8_t* GetQuickCode(ArtMethod* method,
420 const ImageInfo& image_info,
421 bool* quick_is_interpreted)
Mathieu Chartier90443472015-07-16 20:32:27 -0700422 SHARED_REQUIRES(Locks::mutator_lock_);
Mingyao Yang98d1cc82014-05-15 17:02:16 -0700423
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800424 // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800425 size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800426
Mathieu Chartiere401d142015-04-22 13:56:20 -0700427 // Return true if a method is likely to be dirtied at runtime.
Mathieu Chartier90443472015-07-16 20:32:27 -0700428 bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700429
430 // Assign the offset for an ArtMethod.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800431 void AssignMethodOffset(ArtMethod* method,
432 NativeObjectRelocationType type,
Vladimir Marko944da602016-02-19 12:27:55 +0000433 size_t oat_index)
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700434 SHARED_REQUIRES(Locks::mutator_lock_);
435
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700436 // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
437 // relocation.
438 void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
439 SHARED_REQUIRES(Locks::mutator_lock_);
440
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800441 // Return true if klass is loaded by the boot class loader but not in the boot image.
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800442 bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
443
Mathieu Chartier901e0702016-02-19 13:42:48 -0800444 // Return true if klass depends on a boot class loader non image class. We want to prune these
445 // classes since we do not want any boot class loader classes in the image. This means that
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800446 // we also cannot have any classes which refer to these boot class loader non image classes.
Mathieu Chartier901e0702016-02-19 13:42:48 -0800447 // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
448 // driver.
449 bool PruneAppImageClass(mirror::Class* klass)
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800450 SHARED_REQUIRES(Locks::mutator_lock_);
451
Mathieu Chartier945c1c12015-11-24 15:37:12 -0800452 // early_exit is true if we had a cyclic dependency anywhere down the chain.
Mathieu Chartier901e0702016-02-19 13:42:48 -0800453 bool PruneAppImageClassInternal(mirror::Class* klass,
454 bool* early_exit,
455 std::unordered_set<mirror::Class*>* visited)
Mathieu Chartier945c1c12015-11-24 15:37:12 -0800456 SHARED_REQUIRES(Locks::mutator_lock_);
457
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700458 static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
459
Mathieu Chartierbcd9dd72016-03-07 10:25:04 -0800460 uintptr_t NativeOffsetInImage(void* obj) SHARED_REQUIRES(Locks::mutator_lock_);
Vladimir Marko05792b92015-08-03 11:56:49 +0100461
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800462 // Location of where the object will be when the image is loaded at runtime.
Vladimir Marko05792b92015-08-03 11:56:49 +0100463 template <typename T>
Mathieu Chartiere8bf1342016-02-17 18:02:40 -0800464 T* NativeLocationInImage(T* obj) SHARED_REQUIRES(Locks::mutator_lock_);
Andreas Gampe245ee002014-12-04 21:25:04 -0800465
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800466 // Location of where the temporary copy of the object currently is.
467 template <typename T>
Jeff Haodcdc85b2015-12-04 14:06:18 -0800468 T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800469
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800470 // Return true of obj is inside of the boot image space. This may only return true if we are
471 // compiling an app image.
472 bool IsInBootImage(const void* obj) const;
473
474 // Return true if ptr is within the boot oat file.
475 bool IsInBootOatFile(const void* ptr) const;
476
Vladimir Marko944da602016-02-19 12:27:55 +0000477 // Get the index of the oat file associated with the object.
478 size_t GetOatIndex(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800479
Vladimir Marko944da602016-02-19 12:27:55 +0000480 // The oat index for shared data in multi-image and all data in single-image compilation.
481 size_t GetDefaultOatIndex() const {
482 return 0u;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800483 }
484
Vladimir Marko944da602016-02-19 12:27:55 +0000485 ImageInfo& GetImageInfo(size_t oat_index) {
486 return image_infos_[oat_index];
487 }
488
489 const ImageInfo& GetImageInfo(size_t oat_index) const {
490 return image_infos_[oat_index];
491 }
Jeff Haodcdc85b2015-12-04 14:06:18 -0800492
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800493 // Find an already strong interned string in the other images or in the boot image. Used to
494 // remove duplicates in the multi image and app image case.
495 mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
496
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700497 // Return true if there already exists a native allocation for an object.
498 bool NativeRelocationAssigned(void* ptr) const;
499
Brian Carlstrom7940e442013-07-12 13:46:57 -0700500 const CompilerDriver& compiler_driver_;
501
Jeff Haodcdc85b2015-12-04 14:06:18 -0800502 // Beginning target image address for the first image.
503 uint8_t* global_image_begin_;
Vladimir Markof4da6752014-08-01 19:04:18 +0100504
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800505 // Offset from image_begin_ to where the first object is in image_.
506 size_t image_objects_offset_begin_;
507
Mathieu Chartiere401d142015-04-22 13:56:20 -0700508 // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
509 // to keep track. These include vtable arrays, iftable arrays, and dex caches.
510 std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
511
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700512 // Saved hash codes. We use these to restore lockwords which were temporarily used to have
513 // forwarding addresses as well as copying over hash codes.
514 std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800515
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800516 // Boolean flags.
Igor Murashkin46774762014-10-22 11:37:02 -0700517 const bool compile_pic_;
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800518 const bool compile_app_image_;
519
Mathieu Chartier2d721012014-11-10 11:08:06 -0800520 // Size of pointers on the target architecture.
521 size_t target_ptr_size_;
522
Vladimir Marko944da602016-02-19 12:27:55 +0000523 // Image data indexed by the oat file index.
524 dchecked_vector<ImageInfo> image_infos_;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800525
Mathieu Chartiere401d142015-04-22 13:56:20 -0700526 // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
527 // have one entry per art field for convenience. ArtFields are placed right after the end of the
528 // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700529 struct NativeObjectRelocation {
Vladimir Marko944da602016-02-19 12:27:55 +0000530 size_t oat_index;
Mathieu Chartiere401d142015-04-22 13:56:20 -0700531 uintptr_t offset;
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700532 NativeObjectRelocationType type;
533
534 bool IsArtMethodRelocation() const {
535 return type == kNativeObjectRelocationTypeArtMethodClean ||
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700536 type == kNativeObjectRelocationTypeArtMethodDirty ||
537 type == kNativeObjectRelocationTypeRuntimeMethod;
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700538 }
Mathieu Chartiere401d142015-04-22 13:56:20 -0700539 };
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700540 std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
Mathieu Chartierc7853442015-03-27 14:35:38 -0700541
Mathieu Chartiere401d142015-04-22 13:56:20 -0700542 // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
543 ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
544
545 // Counters for measurements, used for logging only.
546 uint64_t dirty_methods_;
547 uint64_t clean_methods_;
Andreas Gampe245ee002014-12-04 21:25:04 -0800548
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800549 // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800550 std::unordered_map<mirror::Class*, bool> prune_class_memo_;
551
Mathieu Chartier67ad20e2015-12-09 15:41:09 -0800552 // Class loaders with a class table to write out. There should only be one class loader because
553 // dex2oat loads the dex files to be compiled into a single class loader. For the boot image,
554 // null is a valid entry.
Mathieu Chartier208a5cb2015-12-02 15:44:07 -0800555 std::unordered_set<mirror::ClassLoader*> class_loaders_;
556
Mathieu Chartierceb07b32015-12-10 09:33:21 -0800557 // Which mode the image is stored as, see image.h
558 const ImageHeader::StorageMode image_storage_mode_;
559
Vladimir Marko944da602016-02-19 12:27:55 +0000560 // The file names of oat files.
561 const std::vector<const char*>& oat_filenames_;
562
563 // Map of dex files to the indexes of oat files that they were compiled into.
564 const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800565
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800566 friend class ContainsBootClassLoaderNonImageClassVisitor;
Mingyao Yang98d1cc82014-05-15 17:02:16 -0700567 friend class FixupClassVisitor;
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700568 friend class FixupRootVisitor;
569 friend class FixupVisitor;
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800570 friend class NativeLocationVisitor;
Mathieu Chartiere0671ce2015-07-28 17:23:28 -0700571 friend class NonImageClassesVisitor;
Mathieu Chartierb7ea3ac2014-03-24 16:54:46 -0700572 DISALLOW_COPY_AND_ASSIGN(ImageWriter);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700573};
574
575} // namespace art
576
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700577#endif // ART_COMPILER_IMAGE_WRITER_H_