blob: acd16813cb5f10006041c3c5c3ee1cd89264677c [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_COMPILER_IMAGE_WRITER_H_
18#define ART_COMPILER_IMAGE_WRITER_H_
Brian Carlstrom7940e442013-07-12 13:46:57 -070019
20#include <stdint.h>
Evgenii Stepanov1e133742015-05-20 12:30:59 -070021#include "base/memory_tool.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070022
23#include <cstddef>
Ian Rogers700a4022014-05-19 16:49:03 -070024#include <memory>
Brian Carlstrom7940e442013-07-12 13:46:57 -070025#include <set>
Mathieu Chartier496577f2016-09-20 15:33:31 -070026#include <stack>
Brian Carlstrom7940e442013-07-12 13:46:57 -070027#include <string>
Igor Murashkinf5b4c502014-11-14 15:01:59 -080028#include <ostream>
Brian Carlstrom7940e442013-07-12 13:46:57 -070029
Vladimir Marko80afd022015-05-19 18:08:00 +010030#include "base/bit_utils.h"
Vladimir Marko944da602016-02-19 12:27:55 +000031#include "base/dchecked_vector.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070032#include "base/enums.h"
Alex Lighte64300b2015-12-15 15:02:47 -080033#include "base/length_prefixed_array.h"
Igor Murashkin46774762014-10-22 11:37:02 -070034#include "base/macros.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070035#include "driver/compiler_driver.h"
Mathieu Chartierfd04b6f2014-11-14 19:34:18 -080036#include "gc/space/space.h"
Mathieu Chartierceb07b32015-12-10 09:33:21 -080037#include "image.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070038#include "lock_word.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070039#include "mem_map.h"
40#include "oat_file.h"
41#include "mirror/dex_cache.h"
42#include "os.h"
43#include "safe_map.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070044#include "utils.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070045
46namespace art {
Mathieu Chartierda5b28a2015-11-05 08:03:47 -080047namespace gc {
48namespace space {
49class ImageSpace;
50} // namespace space
51} // namespace gc
Brian Carlstrom7940e442013-07-12 13:46:57 -070052
Mathieu Chartier1f47b672016-01-07 16:29:01 -080053class ClassTable;
54
Mathieu Chartierfbc31082016-01-24 11:59:56 -080055static constexpr int kInvalidFd = -1;
Mathieu Chartiera90c7722015-10-29 15:41:36 -070056
Brian Carlstrom7940e442013-07-12 13:46:57 -070057// Write a Space built during compilation for use during execution.
Igor Murashkin46774762014-10-22 11:37:02 -070058class ImageWriter FINAL {
Brian Carlstrom7940e442013-07-12 13:46:57 -070059 public:
Mathieu Chartierda5b28a2015-11-05 08:03:47 -080060 ImageWriter(const CompilerDriver& compiler_driver,
61 uintptr_t image_begin,
62 bool compile_pic,
Mathieu Chartierceb07b32015-12-10 09:33:21 -080063 bool compile_app_image,
Jeff Haodcdc85b2015-12-04 14:06:18 -080064 ImageHeader::StorageMode image_storage_mode,
Vladimir Marko944da602016-02-19 12:27:55 +000065 const std::vector<const char*>& oat_filenames,
66 const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map);
Brian Carlstrom7940e442013-07-12 13:46:57 -070067
Vladimir Markof4da6752014-08-01 19:04:18 +010068 bool PrepareImageAddressSpace();
69
70 bool IsImageAddressSpaceReady() const {
Vladimir Marko944da602016-02-19 12:27:55 +000071 DCHECK(!image_infos_.empty());
72 for (const ImageInfo& image_info : image_infos_) {
Jeff Haodcdc85b2015-12-04 14:06:18 -080073 if (image_info.image_roots_address_ == 0u) {
74 return false;
75 }
76 }
Vladimir Marko944da602016-02-19 12:27:55 +000077 return true;
Vladimir Markof4da6752014-08-01 19:04:18 +010078 }
79
Mathieu Chartiere401d142015-04-22 13:56:20 -070080 template <typename T>
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070081 T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
Jeff Haodcdc85b2015-12-04 14:06:18 -080082 if (object == nullptr || IsInBootImage(object)) {
83 return object;
84 } else {
Vladimir Marko944da602016-02-19 12:27:55 +000085 size_t oat_index = GetOatIndex(object);
86 const ImageInfo& image_info = GetImageInfo(oat_index);
Jeff Haodcdc85b2015-12-04 14:06:18 -080087 return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
88 }
Vladimir Markof4da6752014-08-01 19:04:18 +010089 }
90
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070091 ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -070092
Vladimir Marko05792b92015-08-03 11:56:49 +010093 template <typename PtrType>
94 PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070095 const REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Marko944da602016-02-19 12:27:55 +000096 auto oat_it = dex_file_oat_index_map_.find(dex_file);
97 DCHECK(oat_it != dex_file_oat_index_map_.end());
98 const ImageInfo& image_info = GetImageInfo(oat_it->second);
Jeff Haodcdc85b2015-12-04 14:06:18 -080099 auto it = image_info.dex_cache_array_starts_.find(dex_file);
100 DCHECK(it != image_info.dex_cache_array_starts_.end());
Vladimir Marko05792b92015-08-03 11:56:49 +0100101 return reinterpret_cast<PtrType>(
Jeff Haodcdc85b2015-12-04 14:06:18 -0800102 image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] +
103 it->second + offset);
Vladimir Marko20f85592015-03-19 10:07:02 +0000104 }
105
Vladimir Marko944da602016-02-19 12:27:55 +0000106 size_t GetOatFileOffset(size_t oat_index) const {
107 return GetImageInfo(oat_index).oat_offset_;
108 }
109
110 const uint8_t* GetOatFileBegin(size_t oat_index) const {
111 return GetImageInfo(oat_index).oat_file_begin_;
112 }
Vladimir Markof4da6752014-08-01 19:04:18 +0100113
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800114 // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open
Jeff Haodcdc85b2015-12-04 14:06:18 -0800115 // the names in image_filenames.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800116 // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open
117 // the names in oat_filenames.
Mathieu Chartiera90c7722015-10-29 15:41:36 -0700118 bool Write(int image_fd,
Jeff Haodcdc85b2015-12-04 14:06:18 -0800119 const std::vector<const char*>& image_filenames,
Vladimir Marko944da602016-02-19 12:27:55 +0000120 const std::vector<const char*>& oat_filenames)
Mathieu Chartier90443472015-07-16 20:32:27 -0700121 REQUIRES(!Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700122
Vladimir Marko944da602016-02-19 12:27:55 +0000123 uintptr_t GetOatDataBegin(size_t oat_index) {
124 return reinterpret_cast<uintptr_t>(GetImageInfo(oat_index).oat_data_begin_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700125 }
126
Vladimir Marko944da602016-02-19 12:27:55 +0000127 // Get the index of the oat file containing the dex file.
128 //
129 // This "oat_index" is used to retrieve information about the the memory layout
130 // of the oat file and its associated image file, needed for link-time patching
131 // of references to the image or across oat files.
132 size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
133
134 // Get the index of the oat file containing the dex file served by the dex cache.
135 size_t GetOatIndexForDexCache(mirror::DexCache* dex_cache) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700136 REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800137
Vladimir Marko944da602016-02-19 12:27:55 +0000138 // Update the oat layout for the given oat file.
139 // This will make the oat_offset for the next oat file valid.
140 void UpdateOatFileLayout(size_t oat_index,
141 size_t oat_loaded_size,
142 size_t oat_data_offset,
143 size_t oat_data_size);
144 // Update information about the oat header, i.e. checksum and trampoline offsets.
145 void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800146
Brian Carlstrom7940e442013-07-12 13:46:57 -0700147 private:
Mathieu Chartier496577f2016-09-20 15:33:31 -0700148 using WorkStack = std::stack<std::pair<mirror::Object*, size_t>>;
149
Brian Carlstrom7940e442013-07-12 13:46:57 -0700150 bool AllocMemory();
151
Mathieu Chartier31e89252013-08-28 11:29:12 -0700152 // Mark the objects defined in this space in the given live bitmap.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700153 void RecordImageAllocations() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier31e89252013-08-28 11:29:12 -0700154
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800155 // Classify different kinds of bins that objects end up getting packed into during image writing.
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700156 // Ordered from dirtiest to cleanest (until ArtMethods).
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800157 enum Bin {
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700158 kBinMiscDirty, // Dex caches, object locks, etc...
159 kBinClassVerified, // Class verified, but initializers haven't been run
Mathieu Chartierd464fa12016-04-08 18:54:36 -0700160 // Unknown mix of clean/dirty:
161 kBinRegular,
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700162 kBinClassInitialized, // Class initializers have been run
Mathieu Chartierd464fa12016-04-08 18:54:36 -0700163 // All classes get their own bins since their fields often dirty
164 kBinClassInitializedFinalStatics, // Class initializers have been run, no non-final statics
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700165 // Likely-clean:
166 kBinString, // [String] Almost always immutable (except for obj header).
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800167 // Add more bins here if we add more segregation code.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700168 // Non mirror fields must be below.
169 // ArtFields should be always clean.
Mathieu Chartierc7853442015-03-27 14:35:38 -0700170 kBinArtField,
Mathieu Chartiere401d142015-04-22 13:56:20 -0700171 // If the class is initialized, then the ArtMethods are probably clean.
172 kBinArtMethodClean,
173 // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
174 // initialized.
175 kBinArtMethodDirty,
Artem Udovichenkoa62cb9b2016-06-30 09:18:25 +0000176 // IMT (clean)
177 kBinImTable,
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700178 // Conflict tables (clean).
179 kBinIMTConflictTable,
180 // Runtime methods (always clean, do not have a length prefix array).
181 kBinRuntimeMethod,
Vladimir Marko05792b92015-08-03 11:56:49 +0100182 // Dex cache arrays have a special slot for PC-relative addressing. Since they are
183 // huge, and as such their dirtiness is not important for the clean/dirty separation,
184 // we arbitrarily keep them at the end of the native data.
185 kBinDexCacheArray, // Arrays belonging to dex cache.
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800186 kBinSize,
Mathieu Chartierc7853442015-03-27 14:35:38 -0700187 // Number of bins which are for mirror objects.
188 kBinMirrorCount = kBinArtField,
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800189 };
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800190 friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
191
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700192 enum NativeObjectRelocationType {
193 kNativeObjectRelocationTypeArtField,
194 kNativeObjectRelocationTypeArtFieldArray,
195 kNativeObjectRelocationTypeArtMethodClean,
196 kNativeObjectRelocationTypeArtMethodArrayClean,
197 kNativeObjectRelocationTypeArtMethodDirty,
198 kNativeObjectRelocationTypeArtMethodArrayDirty,
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700199 kNativeObjectRelocationTypeRuntimeMethod,
Artem Udovichenkoa62cb9b2016-06-30 09:18:25 +0000200 kNativeObjectRelocationTypeIMTable,
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700201 kNativeObjectRelocationTypeIMTConflictTable,
Vladimir Marko05792b92015-08-03 11:56:49 +0100202 kNativeObjectRelocationTypeDexCacheArray,
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700203 };
204 friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
205
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800206 enum OatAddress {
207 kOatAddressInterpreterToInterpreterBridge,
208 kOatAddressInterpreterToCompiledCodeBridge,
209 kOatAddressJNIDlsymLookup,
210 kOatAddressQuickGenericJNITrampoline,
211 kOatAddressQuickIMTConflictTrampoline,
212 kOatAddressQuickResolutionTrampoline,
213 kOatAddressQuickToInterpreterBridge,
214 // Number of elements in the enum.
215 kOatAddressCount,
216 };
217 friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address);
218
Vladimir Marko80afd022015-05-19 18:08:00 +0100219 static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800220 // uint32 = typeof(lockword_)
Mathieu Chartiere401d142015-04-22 13:56:20 -0700221 // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
222 // failures due to invalid read barrier bits during object field reads.
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700223 static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits - LockWord::kGCStateSize;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800224 // 111000.....0
Mathieu Chartiere401d142015-04-22 13:56:20 -0700225 static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800226
227 // We use the lock word to store the bin # and bin index of the object in the image.
228 //
229 // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
230 // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
231 struct BinSlot {
232 explicit BinSlot(uint32_t lockword);
233 BinSlot(Bin bin, uint32_t index);
234
235 // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
236 Bin GetBin() const;
237 // The offset in bytes from the beginning of the bin. Aligned to object size.
238 uint32_t GetIndex() const;
239 // Pack into a single uint32_t, for storing into a lock word.
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700240 uint32_t Uint32Value() const { return lockword_; }
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800241 // Comparison operator for map support
242 bool operator<(const BinSlot& other) const { return lockword_ < other.lockword_; }
243
244 private:
245 // Must be the same size as LockWord, any larger and we would truncate the data.
246 const uint32_t lockword_;
247 };
248
Jeff Haodcdc85b2015-12-04 14:06:18 -0800249 struct ImageInfo {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800250 ImageInfo();
251 ImageInfo(ImageInfo&&) = default;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800252
Mathieu Chartiera06ba052016-01-06 13:51:52 -0800253 // Create the image sections into the out sections variable, returns the size of the image
254 // excluding the bitmap.
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700255 size_t CreateImageSections(ImageSection* out_sections) const;
Mathieu Chartiera06ba052016-01-06 13:51:52 -0800256
Jeff Haodcdc85b2015-12-04 14:06:18 -0800257 std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
258
259 // Target begin of this image. Notes: It is not valid to write here, this is the address
260 // of the target image, not necessarily where image_ is mapped. The address is only valid
261 // after layouting (otherwise null).
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800262 uint8_t* image_begin_ = nullptr;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800263
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800264 // Offset to the free space in image_, initially size of image header.
265 size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
266 uint32_t image_roots_address_ = 0; // The image roots address in the image.
267 size_t image_offset_ = 0; // Offset of this image from the start of the first image.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800268
269 // Image size is the *address space* covered by this image. As the live bitmap is aligned
270 // to the page size, the live bitmap will cover more address space than necessary. But live
271 // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
272 // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
273 // page-aligned).
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800274 size_t image_size_ = 0;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800275
276 // Oat data.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800277 // Offset of the oat file for this image from start of oat files. This is
278 // valid when the previous oat file has been written.
279 size_t oat_offset_ = 0;
Vladimir Marko944da602016-02-19 12:27:55 +0000280 // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout().
281 const uint8_t* oat_file_begin_ = nullptr;
282 size_t oat_loaded_size_ = 0;
283 const uint8_t* oat_data_begin_ = nullptr;
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800284 size_t oat_size_ = 0; // Size of the corresponding oat data.
Vladimir Marko944da602016-02-19 12:27:55 +0000285 // The oat header checksum, valid after UpdateOatFileHeader().
286 uint32_t oat_checksum_ = 0u;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800287
288 // Image bitmap which lets us know where the objects inside of the image reside.
289 std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
290
291 // The start offsets of the dex cache arrays.
292 SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
293
294 // Offset from oat_data_begin_ to the stubs.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800295 uint32_t oat_address_offsets_[kOatAddressCount] = {};
Jeff Haodcdc85b2015-12-04 14:06:18 -0800296
297 // Bin slot tracking for dirty object packing.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800298 size_t bin_slot_sizes_[kBinSize] = {}; // Number of bytes in a bin.
299 size_t bin_slot_offsets_[kBinSize] = {}; // Number of bytes in previous bins.
300 size_t bin_slot_count_[kBinSize] = {}; // Number of objects in a bin.
301
302 // Cached size of the intern table for when we allocate memory.
303 size_t intern_table_bytes_ = 0;
304
Mathieu Chartier1f47b672016-01-07 16:29:01 -0800305 // Number of image class table bytes.
306 size_t class_table_bytes_ = 0;
307
308 // Intern table associated with this image for serialization.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800309 std::unique_ptr<InternTable> intern_table_;
Mathieu Chartier1f47b672016-01-07 16:29:01 -0800310
311 // Class table associated with this image for serialization.
312 std::unique_ptr<ClassTable> class_table_;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800313 };
314
Mathieu Chartier31e89252013-08-28 11:29:12 -0700315 // We use the lock word to store the offset of the object in the image.
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800316 void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700317 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700318 void SetImageOffset(mirror::Object* object, size_t offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700319 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700320 bool IsImageOffsetAssigned(mirror::Object* object) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700321 REQUIRES_SHARED(Locks::mutator_lock_);
322 size_t GetImageOffset(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700323 void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700324 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700325
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700326 void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier496577f2016-09-20 15:33:31 -0700327 void AssignImageBinSlot(mirror::Object* object, size_t oat_index)
328 REQUIRES_SHARED(Locks::mutator_lock_);
329 mirror::Object* TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index)
330 REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800331 void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700332 REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800333 bool IsImageBinSlotAssigned(mirror::Object* object) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700334 REQUIRES_SHARED(Locks::mutator_lock_);
335 BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800336
Jeff Haodcdc85b2015-12-04 14:06:18 -0800337 void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700338 REQUIRES_SHARED(Locks::mutator_lock_);
339 void AddMethodPointerArray(mirror::PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700340
Alex Lighta59dd802014-07-02 16:28:08 -0700341 static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700342 REQUIRES_SHARED(Locks::mutator_lock_) {
Alex Lighta59dd802014-07-02 16:28:08 -0700343 return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
344 }
345
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700346 mirror::Object* GetLocalAddress(mirror::Object* object) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700347 REQUIRES_SHARED(Locks::mutator_lock_) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700348 size_t offset = GetImageOffset(object);
Vladimir Marko944da602016-02-19 12:27:55 +0000349 size_t oat_index = GetOatIndex(object);
350 const ImageInfo& image_info = GetImageInfo(oat_index);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800351 uint8_t* dst = image_info.image_->Begin() + offset;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700352 return reinterpret_cast<mirror::Object*>(dst);
353 }
354
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800355 // Returns the address in the boot image if we are compiling the app image.
356 const uint8_t* GetOatAddress(OatAddress type) const;
357
Jeff Haodcdc85b2015-12-04 14:06:18 -0800358 const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700359 // With Quick, code is within the OatFile, as there are all in one
Jeff Haodcdc85b2015-12-04 14:06:18 -0800360 // .o ELF object. But interpret it as signed.
361 DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_));
362 DCHECK(image_info.oat_data_begin_ != nullptr);
363 return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700364 }
365
Brian Carlstrom7940e442013-07-12 13:46:57 -0700366 // Returns true if the class was in the original requested image classes list.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700367 bool KeepClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700368
369 // Debug aid that list of requested image classes.
370 void DumpImageClasses();
371
372 // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
373 void ComputeLazyFieldsForImageClasses()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700374 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700375
Brian Carlstrom7940e442013-07-12 13:46:57 -0700376 // Remove unwanted classes from various roots.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700377 void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700378
379 // Verify unwanted classes removed.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700380 void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700381 static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700382 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700383
384 // Lays out where the image objects will be at runtime.
Vladimir Markof4da6752014-08-01 19:04:18 +0100385 void CalculateNewObjectOffsets()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700386 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier496577f2016-09-20 15:33:31 -0700387 void ProcessWorkStack(WorkStack* work_stack)
388 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko944da602016-02-19 12:27:55 +0000389 void CreateHeader(size_t oat_index)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700390 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko944da602016-02-19 12:27:55 +0000391 mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700392 REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800393 void CalculateObjectBinSlots(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700394 REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800395 void UnbinObjectsIntoOffset(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700396 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700397
Mathieu Chartier496577f2016-09-20 15:33:31 -0700398 static void EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700399 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier496577f2016-09-20 15:33:31 -0700400 static void DeflateMonitorCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700401 REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800402 static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700403 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700404
405 // Creates the contiguous image in memory and adjusts pointers.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700406 void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
407 void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700408 static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700409 REQUIRES_SHARED(Locks::mutator_lock_);
410 void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800411 void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700412 REQUIRES_SHARED(Locks::mutator_lock_);
413 void CopyAndFixupImTable(ImTable* orig, ImTable* copy) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700414 void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700415 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierc7853442015-03-27 14:35:38 -0700416 void FixupClass(mirror::Class* orig, mirror::Class* copy)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700417 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800418 void FixupObject(mirror::Object* orig, mirror::Object* copy)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700419 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko05792b92015-08-03 11:56:49 +0100420 void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700421 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800422 void FixupPointerArray(mirror::Object* dst,
423 mirror::PointerArray* arr,
424 mirror::Class* klass,
425 Bin array_type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700426 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700427
Mingyao Yang98d1cc82014-05-15 17:02:16 -0700428 // Get quick code for non-resolution/imt_conflict/abstract method.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800429 const uint8_t* GetQuickCode(ArtMethod* method,
430 const ImageInfo& image_info,
431 bool* quick_is_interpreted)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700432 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang98d1cc82014-05-15 17:02:16 -0700433
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800434 // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800435 size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800436
Mathieu Chartiere401d142015-04-22 13:56:20 -0700437 // Return true if a method is likely to be dirtied at runtime.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700438 bool WillMethodBeDirty(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700439
440 // Assign the offset for an ArtMethod.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800441 void AssignMethodOffset(ArtMethod* method,
442 NativeObjectRelocationType type,
Vladimir Marko944da602016-02-19 12:27:55 +0000443 size_t oat_index)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700444 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700445
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700446 void TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
Artem Udovichenkoa62cb9b2016-06-30 09:18:25 +0000447
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700448 // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
449 // relocation.
450 void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700451 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700452
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800453 // Return true if klass is loaded by the boot class loader but not in the boot image.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700454 bool IsBootClassLoaderNonImageClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800455
Mathieu Chartier901e0702016-02-19 13:42:48 -0800456 // Return true if klass depends on a boot class loader non image class. We want to prune these
457 // classes since we do not want any boot class loader classes in the image. This means that
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800458 // we also cannot have any classes which refer to these boot class loader non image classes.
Mathieu Chartier901e0702016-02-19 13:42:48 -0800459 // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
460 // driver.
461 bool PruneAppImageClass(mirror::Class* klass)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700462 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800463
Mathieu Chartier945c1c12015-11-24 15:37:12 -0800464 // early_exit is true if we had a cyclic dependency anywhere down the chain.
Mathieu Chartier901e0702016-02-19 13:42:48 -0800465 bool PruneAppImageClassInternal(mirror::Class* klass,
466 bool* early_exit,
467 std::unordered_set<mirror::Class*>* visited)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700468 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier945c1c12015-11-24 15:37:12 -0800469
Mathieu Chartier496577f2016-09-20 15:33:31 -0700470 bool IsMultiImage() const {
471 return image_infos_.size() > 1;
472 }
473
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700474 static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
475
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700476 uintptr_t NativeOffsetInImage(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko05792b92015-08-03 11:56:49 +0100477
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800478 // Location of where the object will be when the image is loaded at runtime.
Vladimir Marko05792b92015-08-03 11:56:49 +0100479 template <typename T>
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700480 T* NativeLocationInImage(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampe245ee002014-12-04 21:25:04 -0800481
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800482 // Location of where the temporary copy of the object currently is.
483 template <typename T>
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700484 T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800485
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800486 // Return true of obj is inside of the boot image space. This may only return true if we are
487 // compiling an app image.
488 bool IsInBootImage(const void* obj) const;
489
490 // Return true if ptr is within the boot oat file.
491 bool IsInBootOatFile(const void* ptr) const;
492
Vladimir Marko944da602016-02-19 12:27:55 +0000493 // Get the index of the oat file associated with the object.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700494 size_t GetOatIndex(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800495
Vladimir Marko944da602016-02-19 12:27:55 +0000496 // The oat index for shared data in multi-image and all data in single-image compilation.
497 size_t GetDefaultOatIndex() const {
498 return 0u;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800499 }
500
Vladimir Marko944da602016-02-19 12:27:55 +0000501 ImageInfo& GetImageInfo(size_t oat_index) {
502 return image_infos_[oat_index];
503 }
504
505 const ImageInfo& GetImageInfo(size_t oat_index) const {
506 return image_infos_[oat_index];
507 }
Jeff Haodcdc85b2015-12-04 14:06:18 -0800508
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800509 // Find an already strong interned string in the other images or in the boot image. Used to
510 // remove duplicates in the multi image and app image case.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700511 mirror::String* FindInternedString(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800512
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700513 // Return true if there already exists a native allocation for an object.
514 bool NativeRelocationAssigned(void* ptr) const;
515
Brian Carlstrom7940e442013-07-12 13:46:57 -0700516 const CompilerDriver& compiler_driver_;
517
Jeff Haodcdc85b2015-12-04 14:06:18 -0800518 // Beginning target image address for the first image.
519 uint8_t* global_image_begin_;
Vladimir Markof4da6752014-08-01 19:04:18 +0100520
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800521 // Offset from image_begin_ to where the first object is in image_.
522 size_t image_objects_offset_begin_;
523
Mathieu Chartiere401d142015-04-22 13:56:20 -0700524 // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
525 // to keep track. These include vtable arrays, iftable arrays, and dex caches.
526 std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
527
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700528 // Saved hash codes. We use these to restore lockwords which were temporarily used to have
529 // forwarding addresses as well as copying over hash codes.
530 std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800531
Mathieu Chartier496577f2016-09-20 15:33:31 -0700532 // Oat index map for objects.
533 std::unordered_map<mirror::Object*, uint32_t> oat_index_map_;
534
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800535 // Boolean flags.
Igor Murashkin46774762014-10-22 11:37:02 -0700536 const bool compile_pic_;
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800537 const bool compile_app_image_;
538
Mathieu Chartier2d721012014-11-10 11:08:06 -0800539 // Size of pointers on the target architecture.
Andreas Gampe542451c2016-07-26 09:02:02 -0700540 PointerSize target_ptr_size_;
Mathieu Chartier2d721012014-11-10 11:08:06 -0800541
Vladimir Marko944da602016-02-19 12:27:55 +0000542 // Image data indexed by the oat file index.
543 dchecked_vector<ImageInfo> image_infos_;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800544
Mathieu Chartiere401d142015-04-22 13:56:20 -0700545 // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
546 // have one entry per art field for convenience. ArtFields are placed right after the end of the
547 // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700548 struct NativeObjectRelocation {
Vladimir Marko944da602016-02-19 12:27:55 +0000549 size_t oat_index;
Mathieu Chartiere401d142015-04-22 13:56:20 -0700550 uintptr_t offset;
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700551 NativeObjectRelocationType type;
552
553 bool IsArtMethodRelocation() const {
554 return type == kNativeObjectRelocationTypeArtMethodClean ||
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700555 type == kNativeObjectRelocationTypeArtMethodDirty ||
556 type == kNativeObjectRelocationTypeRuntimeMethod;
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700557 }
Mathieu Chartiere401d142015-04-22 13:56:20 -0700558 };
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700559 std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
Mathieu Chartierc7853442015-03-27 14:35:38 -0700560
Mathieu Chartiere401d142015-04-22 13:56:20 -0700561 // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
562 ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
563
564 // Counters for measurements, used for logging only.
565 uint64_t dirty_methods_;
566 uint64_t clean_methods_;
Andreas Gampe245ee002014-12-04 21:25:04 -0800567
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800568 // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800569 std::unordered_map<mirror::Class*, bool> prune_class_memo_;
570
Mathieu Chartier67ad20e2015-12-09 15:41:09 -0800571 // Class loaders with a class table to write out. There should only be one class loader because
572 // dex2oat loads the dex files to be compiled into a single class loader. For the boot image,
573 // null is a valid entry.
Mathieu Chartier208a5cb2015-12-02 15:44:07 -0800574 std::unordered_set<mirror::ClassLoader*> class_loaders_;
575
Mathieu Chartierceb07b32015-12-10 09:33:21 -0800576 // Which mode the image is stored as, see image.h
577 const ImageHeader::StorageMode image_storage_mode_;
578
Vladimir Marko944da602016-02-19 12:27:55 +0000579 // The file names of oat files.
580 const std::vector<const char*>& oat_filenames_;
581
582 // Map of dex files to the indexes of oat files that they were compiled into.
583 const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800584
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800585 friend class ContainsBootClassLoaderNonImageClassVisitor;
Mingyao Yang98d1cc82014-05-15 17:02:16 -0700586 friend class FixupClassVisitor;
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700587 friend class FixupRootVisitor;
588 friend class FixupVisitor;
Mathieu Chartier496577f2016-09-20 15:33:31 -0700589 class GetRootsVisitor;
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800590 friend class NativeLocationVisitor;
Mathieu Chartiere0671ce2015-07-28 17:23:28 -0700591 friend class NonImageClassesVisitor;
Mathieu Chartier496577f2016-09-20 15:33:31 -0700592 class VisitReferencesVisitor;
Mathieu Chartierb7ea3ac2014-03-24 16:54:46 -0700593 DISALLOW_COPY_AND_ASSIGN(ImageWriter);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700594};
595
596} // namespace art
597
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700598#endif // ART_COMPILER_IMAGE_WRITER_H_