Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_COMPILER_IMAGE_WRITER_H_ |
| 18 | #define ART_COMPILER_IMAGE_WRITER_H_ |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 19 | |
| 20 | #include <stdint.h> |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 21 | #include "base/memory_tool.h" |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 22 | |
| 23 | #include <cstddef> |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 24 | #include <memory> |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 25 | #include <set> |
| 26 | #include <string> |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 27 | #include <ostream> |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 28 | |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 29 | #include "base/bit_utils.h" |
Alex Light | e64300b | 2015-12-15 15:02:47 -0800 | [diff] [blame] | 30 | #include "base/length_prefixed_array.h" |
Igor Murashkin | 4677476 | 2014-10-22 11:37:02 -0700 | [diff] [blame] | 31 | #include "base/macros.h" |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 32 | #include "driver/compiler_driver.h" |
Mathieu Chartier | fd04b6f | 2014-11-14 19:34:18 -0800 | [diff] [blame] | 33 | #include "gc/space/space.h" |
Mathieu Chartier | ceb07b3 | 2015-12-10 09:33:21 -0800 | [diff] [blame] | 34 | #include "image.h" |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 35 | #include "lock_word.h" |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 36 | #include "mem_map.h" |
| 37 | #include "oat_file.h" |
| 38 | #include "mirror/dex_cache.h" |
| 39 | #include "os.h" |
| 40 | #include "safe_map.h" |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 41 | #include "utils.h" |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 42 | |
| 43 | namespace art { |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 44 | namespace gc { |
| 45 | namespace space { |
| 46 | class ImageSpace; |
| 47 | } // namespace space |
| 48 | } // namespace gc |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 49 | |
Mathieu Chartier | a90c772 | 2015-10-29 15:41:36 -0700 | [diff] [blame] | 50 | static constexpr int kInvalidImageFd = -1; |
| 51 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 52 | // Write a Space built during compilation for use during execution. |
Igor Murashkin | 4677476 | 2014-10-22 11:37:02 -0700 | [diff] [blame] | 53 | class ImageWriter FINAL { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 54 | public: |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 55 | ImageWriter(const CompilerDriver& compiler_driver, |
| 56 | uintptr_t image_begin, |
| 57 | bool compile_pic, |
Mathieu Chartier | ceb07b3 | 2015-12-10 09:33:21 -0800 | [diff] [blame] | 58 | bool compile_app_image, |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 59 | ImageHeader::StorageMode image_storage_mode, |
| 60 | const std::vector<const char*> oat_filenames, |
| 61 | const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map) |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 62 | : compiler_driver_(compiler_driver), |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 63 | global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)), |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 64 | image_objects_offset_begin_(0), |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 65 | oat_file_(nullptr), |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 66 | compile_pic_(compile_pic), |
| 67 | compile_app_image_(compile_app_image), |
| 68 | boot_image_space_(nullptr), |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 69 | target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 70 | intern_table_bytes_(0u), |
| 71 | image_method_array_(ImageHeader::kImageMethodsCount), |
| 72 | dirty_methods_(0u), |
Mathieu Chartier | 208a5cb | 2015-12-02 15:44:07 -0800 | [diff] [blame] | 73 | clean_methods_(0u), |
Mathieu Chartier | ceb07b3 | 2015-12-10 09:33:21 -0800 | [diff] [blame] | 74 | class_table_bytes_(0u), |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 75 | image_storage_mode_(image_storage_mode), |
| 76 | dex_file_oat_filename_map_(dex_file_oat_filename_map), |
| 77 | oat_filenames_(oat_filenames), |
| 78 | default_oat_filename_(oat_filenames[0]) { |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 79 | CHECK_NE(image_begin, 0U); |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 80 | for (const char* oat_filename : oat_filenames) { |
| 81 | image_info_map_.emplace(oat_filename, ImageInfo()); |
| 82 | } |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 83 | std::fill_n(image_methods_, arraysize(image_methods_), nullptr); |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 84 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 85 | |
Andreas Gampe | 245ee00 | 2014-12-04 21:25:04 -0800 | [diff] [blame] | 86 | ~ImageWriter() { |
Andreas Gampe | 245ee00 | 2014-12-04 21:25:04 -0800 | [diff] [blame] | 87 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 88 | |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 89 | bool PrepareImageAddressSpace(); |
| 90 | |
| 91 | bool IsImageAddressSpaceReady() const { |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 92 | bool ready = !image_info_map_.empty(); |
| 93 | for (auto& pair : image_info_map_) { |
| 94 | const ImageInfo& image_info = pair.second; |
| 95 | if (image_info.image_roots_address_ == 0u) { |
| 96 | return false; |
| 97 | } |
| 98 | } |
| 99 | return ready; |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 100 | } |
| 101 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 102 | template <typename T> |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 103 | T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) { |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 104 | if (object == nullptr || IsInBootImage(object)) { |
| 105 | return object; |
| 106 | } else { |
| 107 | const char* oat_filename = GetOatFilename(object); |
| 108 | const ImageInfo& image_info = GetConstImageInfo(oat_filename); |
| 109 | return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object)); |
| 110 | } |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 111 | } |
| 112 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 113 | ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 114 | |
Vladimir Marko | 05792b9 | 2015-08-03 11:56:49 +0100 | [diff] [blame] | 115 | template <typename PtrType> |
| 116 | PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset) |
| 117 | const SHARED_REQUIRES(Locks::mutator_lock_) { |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 118 | auto oat_it = dex_file_oat_filename_map_.find(dex_file); |
| 119 | DCHECK(oat_it != dex_file_oat_filename_map_.end()); |
| 120 | const ImageInfo& image_info = GetConstImageInfo(oat_it->second); |
| 121 | auto it = image_info.dex_cache_array_starts_.find(dex_file); |
| 122 | DCHECK(it != image_info.dex_cache_array_starts_.end()); |
Vladimir Marko | 05792b9 | 2015-08-03 11:56:49 +0100 | [diff] [blame] | 123 | return reinterpret_cast<PtrType>( |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 124 | image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] + |
| 125 | it->second + offset); |
Vladimir Marko | 20f8559 | 2015-03-19 10:07:02 +0000 | [diff] [blame] | 126 | } |
| 127 | |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 128 | uint8_t* GetOatFileBegin(const char* oat_filename) const; |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 129 | |
Mathieu Chartier | a90c772 | 2015-10-29 15:41:36 -0700 | [diff] [blame] | 130 | // If image_fd is not kInvalidImageFd, then we use that for the file. Otherwise we open |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 131 | // the names in image_filenames. |
Mathieu Chartier | a90c772 | 2015-10-29 15:41:36 -0700 | [diff] [blame] | 132 | bool Write(int image_fd, |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 133 | const std::vector<const char*>& image_filenames, |
| 134 | const std::vector<const char*>& oat_filenames) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 135 | REQUIRES(!Locks::mutator_lock_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 136 | |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 137 | uintptr_t GetOatDataBegin(const char* oat_filename) { |
| 138 | return reinterpret_cast<uintptr_t>(GetImageInfo(oat_filename).oat_data_begin_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 139 | } |
| 140 | |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 141 | const char* GetOatFilenameForDexCache(mirror::DexCache* dex_cache) const |
| 142 | SHARED_REQUIRES(Locks::mutator_lock_); |
| 143 | |
| 144 | // Update the oat size for the given oat file. This will make the oat_offset for the next oat |
| 145 | // file valid. |
| 146 | void UpdateOatFile(const char* oat_filename); |
| 147 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 148 | private: |
| 149 | bool AllocMemory(); |
| 150 | |
Mathieu Chartier | 31e8925 | 2013-08-28 11:29:12 -0700 | [diff] [blame] | 151 | // Mark the objects defined in this space in the given live bitmap. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 152 | void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | 31e8925 | 2013-08-28 11:29:12 -0700 | [diff] [blame] | 153 | |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 154 | // Classify different kinds of bins that objects end up getting packed into during image writing. |
| 155 | enum Bin { |
| 156 | // Likely-clean: |
| 157 | kBinString, // [String] Almost always immutable (except for obj header). |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 158 | // Unknown mix of clean/dirty: |
| 159 | kBinRegular, |
| 160 | // Likely-dirty: |
| 161 | // All classes get their own bins since their fields often dirty |
| 162 | kBinClassInitializedFinalStatics, // Class initializers have been run, no non-final statics |
| 163 | kBinClassInitialized, // Class initializers have been run |
| 164 | kBinClassVerified, // Class verified, but initializers haven't been run |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 165 | // Add more bins here if we add more segregation code. |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 166 | // Non mirror fields must be below. |
| 167 | // ArtFields should be always clean. |
Mathieu Chartier | c785344 | 2015-03-27 14:35:38 -0700 | [diff] [blame] | 168 | kBinArtField, |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 169 | // If the class is initialized, then the ArtMethods are probably clean. |
| 170 | kBinArtMethodClean, |
| 171 | // ArtMethods may be dirty if the class has native methods or a declaring class that isn't |
| 172 | // initialized. |
| 173 | kBinArtMethodDirty, |
Vladimir Marko | 05792b9 | 2015-08-03 11:56:49 +0100 | [diff] [blame] | 174 | // Dex cache arrays have a special slot for PC-relative addressing. Since they are |
| 175 | // huge, and as such their dirtiness is not important for the clean/dirty separation, |
| 176 | // we arbitrarily keep them at the end of the native data. |
| 177 | kBinDexCacheArray, // Arrays belonging to dex cache. |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 178 | kBinSize, |
Mathieu Chartier | c785344 | 2015-03-27 14:35:38 -0700 | [diff] [blame] | 179 | // Number of bins which are for mirror objects. |
| 180 | kBinMirrorCount = kBinArtField, |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 181 | }; |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 182 | friend std::ostream& operator<<(std::ostream& stream, const Bin& bin); |
| 183 | |
Mathieu Chartier | 54d220e | 2015-07-30 16:20:06 -0700 | [diff] [blame] | 184 | enum NativeObjectRelocationType { |
| 185 | kNativeObjectRelocationTypeArtField, |
| 186 | kNativeObjectRelocationTypeArtFieldArray, |
| 187 | kNativeObjectRelocationTypeArtMethodClean, |
| 188 | kNativeObjectRelocationTypeArtMethodArrayClean, |
| 189 | kNativeObjectRelocationTypeArtMethodDirty, |
| 190 | kNativeObjectRelocationTypeArtMethodArrayDirty, |
Vladimir Marko | 05792b9 | 2015-08-03 11:56:49 +0100 | [diff] [blame] | 191 | kNativeObjectRelocationTypeDexCacheArray, |
Mathieu Chartier | 54d220e | 2015-07-30 16:20:06 -0700 | [diff] [blame] | 192 | }; |
| 193 | friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type); |
| 194 | |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 195 | enum OatAddress { |
| 196 | kOatAddressInterpreterToInterpreterBridge, |
| 197 | kOatAddressInterpreterToCompiledCodeBridge, |
| 198 | kOatAddressJNIDlsymLookup, |
| 199 | kOatAddressQuickGenericJNITrampoline, |
| 200 | kOatAddressQuickIMTConflictTrampoline, |
| 201 | kOatAddressQuickResolutionTrampoline, |
| 202 | kOatAddressQuickToInterpreterBridge, |
| 203 | // Number of elements in the enum. |
| 204 | kOatAddressCount, |
| 205 | }; |
| 206 | friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address); |
| 207 | |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 208 | static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1); |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 209 | // uint32 = typeof(lockword_) |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 210 | // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK |
| 211 | // failures due to invalid read barrier bits during object field reads. |
| 212 | static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits - |
| 213 | LockWord::kReadBarrierStateSize; |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 214 | // 111000.....0 |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 215 | static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift; |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 216 | |
| 217 | // We use the lock word to store the bin # and bin index of the object in the image. |
| 218 | // |
| 219 | // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up |
| 220 | // stored in the lock word bit-for-bit when object forwarding addresses are being calculated. |
| 221 | struct BinSlot { |
| 222 | explicit BinSlot(uint32_t lockword); |
| 223 | BinSlot(Bin bin, uint32_t index); |
| 224 | |
| 225 | // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc. |
| 226 | Bin GetBin() const; |
| 227 | // The offset in bytes from the beginning of the bin. Aligned to object size. |
| 228 | uint32_t GetIndex() const; |
| 229 | // Pack into a single uint32_t, for storing into a lock word. |
Mathieu Chartier | d39645e | 2015-06-09 17:50:29 -0700 | [diff] [blame] | 230 | uint32_t Uint32Value() const { return lockword_; } |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 231 | // Comparison operator for map support |
| 232 | bool operator<(const BinSlot& other) const { return lockword_ < other.lockword_; } |
| 233 | |
| 234 | private: |
| 235 | // Must be the same size as LockWord, any larger and we would truncate the data. |
| 236 | const uint32_t lockword_; |
| 237 | }; |
| 238 | |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 239 | struct ImageInfo { |
| 240 | explicit ImageInfo() |
| 241 | : image_begin_(nullptr), |
| 242 | image_end_(RoundUp(sizeof(ImageHeader), kObjectAlignment)), |
| 243 | image_roots_address_(0), |
| 244 | image_offset_(0), |
| 245 | image_size_(0), |
| 246 | oat_offset_(0), |
| 247 | bin_slot_sizes_(), |
| 248 | bin_slot_offsets_(), |
| 249 | bin_slot_count_() {} |
| 250 | |
| 251 | std::unique_ptr<MemMap> image_; // Memory mapped for generating the image. |
| 252 | |
| 253 | // Target begin of this image. Notes: It is not valid to write here, this is the address |
| 254 | // of the target image, not necessarily where image_ is mapped. The address is only valid |
| 255 | // after layouting (otherwise null). |
| 256 | uint8_t* image_begin_; |
| 257 | |
| 258 | size_t image_end_; // Offset to the free space in image_, initially size of image header. |
| 259 | uint32_t image_roots_address_; // The image roots address in the image. |
| 260 | size_t image_offset_; // Offset of this image from the start of the first image. |
| 261 | |
| 262 | // Image size is the *address space* covered by this image. As the live bitmap is aligned |
| 263 | // to the page size, the live bitmap will cover more address space than necessary. But live |
| 264 | // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size. |
| 265 | // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be |
| 266 | // page-aligned). |
| 267 | size_t image_size_; |
| 268 | |
| 269 | // Oat data. |
| 270 | size_t oat_offset_; // Offset of the oat file for this image from start of oat files. This is |
| 271 | // valid when the previous oat file has been written. |
| 272 | uint8_t* oat_data_begin_; // Start of oatdata in the corresponding oat file. This is |
| 273 | // valid when the images have been layed out. |
| 274 | size_t oat_size_; // Size of the corresponding oat data. |
| 275 | |
| 276 | // Image bitmap which lets us know where the objects inside of the image reside. |
| 277 | std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_; |
| 278 | |
| 279 | // The start offsets of the dex cache arrays. |
| 280 | SafeMap<const DexFile*, size_t> dex_cache_array_starts_; |
| 281 | |
| 282 | // Offset from oat_data_begin_ to the stubs. |
| 283 | uint32_t oat_address_offsets_[kOatAddressCount]; |
| 284 | |
| 285 | // Bin slot tracking for dirty object packing. |
| 286 | size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin. |
| 287 | size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins. |
| 288 | size_t bin_slot_count_[kBinSize]; // Number of objects in a bin. |
| 289 | }; |
| 290 | |
Mathieu Chartier | 31e8925 | 2013-08-28 11:29:12 -0700 | [diff] [blame] | 291 | // We use the lock word to store the offset of the object in the image. |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 292 | void AssignImageOffset(mirror::Object* object, BinSlot bin_slot) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 293 | SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | d39645e | 2015-06-09 17:50:29 -0700 | [diff] [blame] | 294 | void SetImageOffset(mirror::Object* object, size_t offset) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 295 | SHARED_REQUIRES(Locks::mutator_lock_); |
Ian Rogers | b0fa5dc | 2014-04-28 16:47:08 -0700 | [diff] [blame] | 296 | bool IsImageOffsetAssigned(mirror::Object* object) const |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 297 | SHARED_REQUIRES(Locks::mutator_lock_); |
| 298 | size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 299 | void UpdateImageOffset(mirror::Object* obj, uintptr_t offset) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 300 | SHARED_REQUIRES(Locks::mutator_lock_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 301 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 302 | void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_); |
| 303 | void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 304 | void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 305 | SHARED_REQUIRES(Locks::mutator_lock_); |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 306 | bool IsImageBinSlotAssigned(mirror::Object* object) const |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 307 | SHARED_REQUIRES(Locks::mutator_lock_); |
| 308 | BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_); |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 309 | |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 310 | void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache) |
| 311 | SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 312 | void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 313 | |
Alex Light | a59dd80 | 2014-07-02 16:28:08 -0700 | [diff] [blame] | 314 | static void* GetImageAddressCallback(void* writer, mirror::Object* obj) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 315 | SHARED_REQUIRES(Locks::mutator_lock_) { |
Alex Light | a59dd80 | 2014-07-02 16:28:08 -0700 | [diff] [blame] | 316 | return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj); |
| 317 | } |
| 318 | |
Ian Rogers | b0fa5dc | 2014-04-28 16:47:08 -0700 | [diff] [blame] | 319 | mirror::Object* GetLocalAddress(mirror::Object* object) const |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 320 | SHARED_REQUIRES(Locks::mutator_lock_) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 321 | size_t offset = GetImageOffset(object); |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 322 | const char* oat_filename = GetOatFilename(object); |
| 323 | const ImageInfo& image_info = GetConstImageInfo(oat_filename); |
| 324 | uint8_t* dst = image_info.image_->Begin() + offset; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 325 | return reinterpret_cast<mirror::Object*>(dst); |
| 326 | } |
| 327 | |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 328 | // Returns the address in the boot image if we are compiling the app image. |
| 329 | const uint8_t* GetOatAddress(OatAddress type) const; |
| 330 | |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 331 | const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 332 | // With Quick, code is within the OatFile, as there are all in one |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 333 | // .o ELF object. But interpret it as signed. |
| 334 | DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_)); |
| 335 | DCHECK(image_info.oat_data_begin_ != nullptr); |
| 336 | return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset); |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 337 | } |
| 338 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 339 | // Returns true if the class was in the original requested image classes list. |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 340 | bool KeepClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 341 | |
| 342 | // Debug aid that list of requested image classes. |
| 343 | void DumpImageClasses(); |
| 344 | |
| 345 | // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying. |
| 346 | void ComputeLazyFieldsForImageClasses() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 347 | SHARED_REQUIRES(Locks::mutator_lock_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 348 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 349 | // Remove unwanted classes from various roots. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 350 | void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 351 | |
| 352 | // Verify unwanted classes removed. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 353 | void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 354 | static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 355 | SHARED_REQUIRES(Locks::mutator_lock_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 356 | |
| 357 | // Lays out where the image objects will be at runtime. |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 358 | void CalculateNewObjectOffsets() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 359 | SHARED_REQUIRES(Locks::mutator_lock_); |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 360 | void CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 361 | SHARED_REQUIRES(Locks::mutator_lock_); |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 362 | mirror::ObjectArray<mirror::Object>* CreateImageRoots(const char* oat_filename) const |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 363 | SHARED_REQUIRES(Locks::mutator_lock_); |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 364 | void CalculateObjectBinSlots(mirror::Object* obj) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 365 | SHARED_REQUIRES(Locks::mutator_lock_); |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 366 | void UnbinObjectsIntoOffset(mirror::Object* obj) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 367 | SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 368 | |
| 369 | void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 370 | SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 371 | void WalkFieldsInOrder(mirror::Object* obj) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 372 | SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 373 | static void WalkFieldsCallback(mirror::Object* obj, void* arg) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 374 | SHARED_REQUIRES(Locks::mutator_lock_); |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 375 | static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 376 | SHARED_REQUIRES(Locks::mutator_lock_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 377 | |
| 378 | // Creates the contiguous image in memory and adjusts pointers. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 379 | void CopyAndFixupNativeData() SHARED_REQUIRES(Locks::mutator_lock_); |
| 380 | void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 381 | static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 382 | SHARED_REQUIRES(Locks::mutator_lock_); |
| 383 | void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 384 | void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 385 | SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | c785344 | 2015-03-27 14:35:38 -0700 | [diff] [blame] | 386 | void FixupClass(mirror::Class* orig, mirror::Class* copy) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 387 | SHARED_REQUIRES(Locks::mutator_lock_); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 388 | void FixupObject(mirror::Object* orig, mirror::Object* copy) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 389 | SHARED_REQUIRES(Locks::mutator_lock_); |
Vladimir Marko | 05792b9 | 2015-08-03 11:56:49 +0100 | [diff] [blame] | 390 | void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache) |
| 391 | SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | a808bac | 2015-11-05 16:33:15 -0800 | [diff] [blame] | 392 | void FixupPointerArray(mirror::Object* dst, |
| 393 | mirror::PointerArray* arr, |
| 394 | mirror::Class* klass, |
| 395 | Bin array_type) |
| 396 | SHARED_REQUIRES(Locks::mutator_lock_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 397 | |
Mingyao Yang | 98d1cc8 | 2014-05-15 17:02:16 -0700 | [diff] [blame] | 398 | // Get quick code for non-resolution/imt_conflict/abstract method. |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 399 | const uint8_t* GetQuickCode(ArtMethod* method, |
| 400 | const ImageInfo& image_info, |
| 401 | bool* quick_is_interpreted) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 402 | SHARED_REQUIRES(Locks::mutator_lock_); |
Mingyao Yang | 98d1cc8 | 2014-05-15 17:02:16 -0700 | [diff] [blame] | 403 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 404 | // Patches references in OatFile to expect runtime addresses. |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 405 | void SetOatChecksumFromElfFile(File* elf_file); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 406 | |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 407 | // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins. |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 408 | size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const; |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 409 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 410 | // Return true if a method is likely to be dirtied at runtime. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 411 | bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 412 | |
| 413 | // Assign the offset for an ArtMethod. |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 414 | void AssignMethodOffset(ArtMethod* method, |
| 415 | NativeObjectRelocationType type, |
| 416 | const char* oat_filename) |
Mathieu Chartier | 54d220e | 2015-07-30 16:20:06 -0700 | [diff] [blame] | 417 | SHARED_REQUIRES(Locks::mutator_lock_); |
| 418 | |
Mathieu Chartier | a808bac | 2015-11-05 16:33:15 -0800 | [diff] [blame] | 419 | // Return true if klass is loaded by the boot class loader but not in the boot image. |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 420 | bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); |
| 421 | |
Mathieu Chartier | a808bac | 2015-11-05 16:33:15 -0800 | [diff] [blame] | 422 | // Return true if klass depends on a boot class loader non image class live. We want to prune |
| 423 | // these classes since we do not want any boot class loader classes in the image. This means that |
| 424 | // we also cannot have any classes which refer to these boot class loader non image classes. |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 425 | bool ContainsBootClassLoaderNonImageClass(mirror::Class* klass) |
| 426 | SHARED_REQUIRES(Locks::mutator_lock_); |
| 427 | |
Mathieu Chartier | 945c1c1 | 2015-11-24 15:37:12 -0800 | [diff] [blame] | 428 | // early_exit is true if we had a cyclic dependency anywhere down the chain. |
| 429 | bool ContainsBootClassLoaderNonImageClassInternal(mirror::Class* klass, |
| 430 | bool* early_exit, |
| 431 | std::unordered_set<mirror::Class*>* visited) |
| 432 | SHARED_REQUIRES(Locks::mutator_lock_); |
| 433 | |
Mathieu Chartier | 54d220e | 2015-07-30 16:20:06 -0700 | [diff] [blame] | 434 | static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type); |
| 435 | |
Vladimir Marko | 05792b9 | 2015-08-03 11:56:49 +0100 | [diff] [blame] | 436 | uintptr_t NativeOffsetInImage(void* obj); |
| 437 | |
Mathieu Chartier | 4b00d34 | 2015-11-13 10:42:08 -0800 | [diff] [blame] | 438 | // Location of where the object will be when the image is loaded at runtime. |
Vladimir Marko | 05792b9 | 2015-08-03 11:56:49 +0100 | [diff] [blame] | 439 | template <typename T> |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 440 | T* NativeLocationInImage(T* obj, const char* oat_filename) SHARED_REQUIRES(Locks::mutator_lock_); |
Andreas Gampe | 245ee00 | 2014-12-04 21:25:04 -0800 | [diff] [blame] | 441 | |
Mathieu Chartier | 4b00d34 | 2015-11-13 10:42:08 -0800 | [diff] [blame] | 442 | // Location of where the temporary copy of the object currently is. |
| 443 | template <typename T> |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 444 | T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) SHARED_REQUIRES(Locks::mutator_lock_); |
Mathieu Chartier | 4b00d34 | 2015-11-13 10:42:08 -0800 | [diff] [blame] | 445 | |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 446 | // Return true of obj is inside of the boot image space. This may only return true if we are |
| 447 | // compiling an app image. |
| 448 | bool IsInBootImage(const void* obj) const; |
| 449 | |
| 450 | // Return true if ptr is within the boot oat file. |
| 451 | bool IsInBootOatFile(const void* ptr) const; |
| 452 | |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 453 | const char* GetOatFilename(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_); |
| 454 | |
| 455 | const char* GetDefaultOatFilename() const { |
| 456 | return default_oat_filename_; |
| 457 | } |
| 458 | |
| 459 | ImageInfo& GetImageInfo(const char* oat_filename); |
| 460 | const ImageInfo& GetConstImageInfo(const char* oat_filename) const; |
| 461 | const ImageInfo& GetImageInfo(size_t index) const; |
| 462 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 463 | const CompilerDriver& compiler_driver_; |
| 464 | |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 465 | // Beginning target image address for the first image. |
| 466 | uint8_t* global_image_begin_; |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 467 | |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 468 | // Offset from image_begin_ to where the first object is in image_. |
| 469 | size_t image_objects_offset_begin_; |
| 470 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 471 | // oat file with code for this image |
| 472 | OatFile* oat_file_; |
| 473 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 474 | // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need |
| 475 | // to keep track. These include vtable arrays, iftable arrays, and dex caches. |
| 476 | std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_; |
| 477 | |
Mathieu Chartier | d39645e | 2015-06-09 17:50:29 -0700 | [diff] [blame] | 478 | // Saved hash codes. We use these to restore lockwords which were temporarily used to have |
| 479 | // forwarding addresses as well as copying over hash codes. |
| 480 | std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_; |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 481 | |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 482 | // Boolean flags. |
Igor Murashkin | 4677476 | 2014-10-22 11:37:02 -0700 | [diff] [blame] | 483 | const bool compile_pic_; |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 484 | const bool compile_app_image_; |
| 485 | |
Mathieu Chartier | a808bac | 2015-11-05 16:33:15 -0800 | [diff] [blame] | 486 | // Cache the boot image space in this class for faster lookups. |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 487 | gc::space::ImageSpace* boot_image_space_; |
Mathieu Chartier | b7ea3ac | 2014-03-24 16:54:46 -0700 | [diff] [blame] | 488 | |
Mathieu Chartier | 2d72101 | 2014-11-10 11:08:06 -0800 | [diff] [blame] | 489 | // Size of pointers on the target architecture. |
| 490 | size_t target_ptr_size_; |
| 491 | |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 492 | // Mapping of oat filename to image data. |
| 493 | std::unordered_map<std::string, ImageInfo> image_info_map_; |
Igor Murashkin | f5b4c50 | 2014-11-14 15:01:59 -0800 | [diff] [blame] | 494 | |
Mathieu Chartier | d39645e | 2015-06-09 17:50:29 -0700 | [diff] [blame] | 495 | // Cached size of the intern table for when we allocate memory. |
| 496 | size_t intern_table_bytes_; |
| 497 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 498 | // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to |
| 499 | // have one entry per art field for convenience. ArtFields are placed right after the end of the |
| 500 | // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields. |
Mathieu Chartier | 54d220e | 2015-07-30 16:20:06 -0700 | [diff] [blame] | 501 | struct NativeObjectRelocation { |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 502 | const char* oat_filename; |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 503 | uintptr_t offset; |
Mathieu Chartier | 54d220e | 2015-07-30 16:20:06 -0700 | [diff] [blame] | 504 | NativeObjectRelocationType type; |
| 505 | |
| 506 | bool IsArtMethodRelocation() const { |
| 507 | return type == kNativeObjectRelocationTypeArtMethodClean || |
| 508 | type == kNativeObjectRelocationTypeArtMethodDirty; |
| 509 | } |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 510 | }; |
Mathieu Chartier | 54d220e | 2015-07-30 16:20:06 -0700 | [diff] [blame] | 511 | std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_; |
Mathieu Chartier | c785344 | 2015-03-27 14:35:38 -0700 | [diff] [blame] | 512 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 513 | // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image. |
| 514 | ArtMethod* image_methods_[ImageHeader::kImageMethodsCount]; |
Mathieu Chartier | c0fe56a | 2015-08-11 13:01:23 -0700 | [diff] [blame] | 515 | // Fake length prefixed array for image methods. This array does not contain the actual |
| 516 | // ArtMethods. We only use it for the header and relocation addresses. |
Mathieu Chartier | 54d220e | 2015-07-30 16:20:06 -0700 | [diff] [blame] | 517 | LengthPrefixedArray<ArtMethod> image_method_array_; |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 518 | |
| 519 | // Counters for measurements, used for logging only. |
| 520 | uint64_t dirty_methods_; |
| 521 | uint64_t clean_methods_; |
Andreas Gampe | 245ee00 | 2014-12-04 21:25:04 -0800 | [diff] [blame] | 522 | |
Mathieu Chartier | a808bac | 2015-11-05 16:33:15 -0800 | [diff] [blame] | 523 | // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass. |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 524 | std::unordered_map<mirror::Class*, bool> prune_class_memo_; |
| 525 | |
Mathieu Chartier | 67ad20e | 2015-12-09 15:41:09 -0800 | [diff] [blame] | 526 | // Class loaders with a class table to write out. There should only be one class loader because |
| 527 | // dex2oat loads the dex files to be compiled into a single class loader. For the boot image, |
| 528 | // null is a valid entry. |
Mathieu Chartier | 208a5cb | 2015-12-02 15:44:07 -0800 | [diff] [blame] | 529 | std::unordered_set<mirror::ClassLoader*> class_loaders_; |
| 530 | |
| 531 | // Number of image class table bytes. |
| 532 | size_t class_table_bytes_; |
| 533 | |
Mathieu Chartier | ceb07b3 | 2015-12-10 09:33:21 -0800 | [diff] [blame] | 534 | // Which mode the image is stored as, see image.h |
| 535 | const ImageHeader::StorageMode image_storage_mode_; |
| 536 | |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 537 | // Map of dex files to the oat filenames that they were compiled into. |
| 538 | const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map_; |
| 539 | const std::vector<const char*> oat_filenames_; |
| 540 | const char* default_oat_filename_; |
| 541 | |
Mathieu Chartier | da5b28a | 2015-11-05 08:03:47 -0800 | [diff] [blame] | 542 | friend class ContainsBootClassLoaderNonImageClassVisitor; |
Mingyao Yang | 98d1cc8 | 2014-05-15 17:02:16 -0700 | [diff] [blame] | 543 | friend class FixupClassVisitor; |
Mathieu Chartier | d39645e | 2015-06-09 17:50:29 -0700 | [diff] [blame] | 544 | friend class FixupRootVisitor; |
| 545 | friend class FixupVisitor; |
Mathieu Chartier | 4b00d34 | 2015-11-13 10:42:08 -0800 | [diff] [blame] | 546 | friend class NativeLocationVisitor; |
Mathieu Chartier | e0671ce | 2015-07-28 17:23:28 -0700 | [diff] [blame] | 547 | friend class NonImageClassesVisitor; |
Mathieu Chartier | b7ea3ac | 2014-03-24 16:54:46 -0700 | [diff] [blame] | 548 | DISALLOW_COPY_AND_ASSIGN(ImageWriter); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 549 | }; |
| 550 | |
| 551 | } // namespace art |
| 552 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 553 | #endif // ART_COMPILER_IMAGE_WRITER_H_ |