Re-enable adding intern table to image
Changed intern table to have a stack of tables similarily to
ClassTable. Adding an image intern table adds to the front of the
intern table stack. Also some cleanup.
Bug: 26317072
Change-Id: I7bbf9485b5dbbbf3707fed21e29de3beccfb8705
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 17d0f61..503b75b 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -637,7 +637,7 @@
ImageInfo& image_info = GetImageInfo(oat_filename);
const size_t length = RoundUp(image_objects_offset_begin_ +
GetBinSizeSum(image_info) +
- intern_table_bytes_ +
+ image_info.intern_table_bytes_ +
class_table_bytes_,
kPageSize);
std::string error_msg;
@@ -909,14 +909,17 @@
DCHECK(obj != nullptr);
// if it is a string, we want to intern it if its not interned.
if (obj->GetClass()->IsStringClass()) {
+ const char* oat_filename = GetOatFilename(obj);
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+
// we must be an interned string that was forward referenced and already assigned
if (IsImageBinSlotAssigned(obj)) {
- DCHECK_EQ(obj, obj->AsString()->Intern());
+ DCHECK_EQ(obj, image_info.intern_table_->InternStrongImageString(obj->AsString()));
return;
}
// InternImageString allows us to intern while holding the heap bitmap lock. This is safe since
// we are guaranteed to not have GC during image writing.
- mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString(
+ mirror::String* const interned = image_info.intern_table_->InternStrongImageString(
obj->AsString());
if (obj != interned) {
if (!IsImageBinSlotAssigned(interned)) {
@@ -1249,6 +1252,15 @@
// Calculate size of the dex cache arrays slot and prepare offsets.
PrepareDexCacheArraySlots();
+ // Calculate the sizes of the intern tables.
+ for (const char* oat_filename : oat_filenames_) {
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ // Calculate how big the intern table will be after being serialized.
+ InternTable* const intern_table = image_info.intern_table_.get();
+ CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
+ image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+ }
+
// Calculate bin slot offsets.
for (const char* oat_filename : oat_filenames_) {
ImageInfo& image_info = GetImageInfo(oat_filename);
@@ -1279,7 +1291,7 @@
image_info.bin_slot_sizes_[kBinArtMethodDirty] +
image_info.bin_slot_sizes_[kBinArtMethodClean] +
image_info.bin_slot_sizes_[kBinDexCacheArray] +
- intern_table_bytes_ +
+ image_info.intern_table_bytes_ +
class_table_bytes_;
size_t image_objects = RoundUp(image_info.image_end_, kPageSize);
size_t bitmap_size =
@@ -1310,12 +1322,7 @@
relocation.offset += image_info.bin_slot_offsets_[bin_type];
}
- /* TODO: Reenable the intern table and class table. b/26317072
- // Calculate how big the intern table will be after being serialized.
- InternTable* const intern_table = runtime->GetInternTable();
- CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
- intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
-
+ /* TODO: Reenable the class table. b/26317072
// Write out the class table.
ClassLinker* class_linker = runtime->GetClassLinker();
if (boot_image_space_ == nullptr) {
@@ -1378,7 +1385,7 @@
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
// Calculate the size of the interned strings.
auto* interned_strings_section = §ions[ImageHeader::kSectionInternedStrings];
- *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
+ *interned_strings_section = ImageSection(cur_pos, image_info.intern_table_bytes_);
cur_pos = interned_strings_section->End();
// Round up to the alignment the class table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
@@ -1444,7 +1451,7 @@
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
- *roots[i] = ImageAddress(*roots[i]);
+ *roots[i] = image_writer_->GetImageAddress(*roots[i]);
}
}
@@ -1452,19 +1459,12 @@
const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
- roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr()));
+ roots[i]->Assign(image_writer_->GetImageAddress(roots[i]->AsMirrorPtr()));
}
}
private:
ImageWriter* const image_writer_;
-
- mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
- const size_t offset = image_writer_->GetImageOffset(obj);
- auto* const dest = reinterpret_cast<Object*>(image_writer_->global_image_begin_ + offset);
- VLOG(compiler) << "Update root from " << obj << " to " << dest;
- return dest;
- }
};
void ImageWriter::CopyAndFixupNativeData() {
@@ -1536,26 +1536,26 @@
}
FixupRootVisitor root_visitor(this);
- /* TODO: Reenable the intern table and class table
// Write the intern table into the image.
- const ImageSection& intern_table_section = image_header->GetImageSection(
- ImageHeader::kSectionInternedStrings);
- Runtime* const runtime = Runtime::Current();
- InternTable* const intern_table = runtime->GetInternTable();
- uint8_t* const intern_table_memory_ptr =
- image_info.image_->Begin() + intern_table_section.Offset();
- const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr);
- CHECK_EQ(intern_table_bytes, intern_table_bytes_);
- // Fixup the pointers in the newly written intern table to contain image addresses.
- InternTable temp_intern_table;
- // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
- // the VisitRoots() will update the memory directly rather than the copies.
- // This also relies on visit roots not doing any verification which could fail after we update
- // the roots to be the image addresses.
- temp_intern_table.ReadFromMemory(intern_table_memory_ptr);
- CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
- temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
-
+ if (image_info.intern_table_bytes_ > 0) {
+ const ImageSection& intern_table_section = image_header->GetImageSection(
+ ImageHeader::kSectionInternedStrings);
+ InternTable* const intern_table = image_info.intern_table_.get();
+ uint8_t* const intern_table_memory_ptr =
+ image_info.image_->Begin() + intern_table_section.Offset();
+ const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr);
+ CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_);
+ // Fixup the pointers in the newly written intern table to contain image addresses.
+ InternTable temp_intern_table;
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
+ // the VisitRoots() will update the memory directly rather than the copies.
+ // This also relies on visit roots not doing any verification which could fail after we update
+ // the roots to be the image addresses.
+ temp_intern_table.AddTableFromMemory(intern_table_memory_ptr);
+ CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
+ temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
+ }
+ /* TODO: Reenable the class table writing.
// Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple
// class loaders. Writing multiple class tables into the image is currently unsupported.
if (class_table_bytes_ > 0u) {
@@ -2110,7 +2110,6 @@
}
uint8_t* ImageWriter::GetOatFileBegin(const char* oat_filename) const {
- // DCHECK_GT(intern_table_bytes_, 0u); TODO: Reenable intern table and class table.
uintptr_t last_image_end = 0;
for (const char* oat_fn : oat_filenames_) {
const ImageInfo& image_info = GetConstImageInfo(oat_fn);
@@ -2197,4 +2196,37 @@
}
}
+ImageWriter::ImageWriter(
+ const CompilerDriver& compiler_driver,
+ uintptr_t image_begin,
+ bool compile_pic,
+ bool compile_app_image,
+ ImageHeader::StorageMode image_storage_mode,
+ const std::vector<const char*> oat_filenames,
+ const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map)
+ : compiler_driver_(compiler_driver),
+ global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
+ image_objects_offset_begin_(0),
+ oat_file_(nullptr),
+ compile_pic_(compile_pic),
+ compile_app_image_(compile_app_image),
+ boot_image_space_(nullptr),
+ target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
+ image_method_array_(ImageHeader::kImageMethodsCount),
+ dirty_methods_(0u),
+ clean_methods_(0u),
+ class_table_bytes_(0u),
+ image_storage_mode_(image_storage_mode),
+ dex_file_oat_filename_map_(dex_file_oat_filename_map),
+ oat_filenames_(oat_filenames),
+ default_oat_filename_(oat_filenames[0]) {
+ CHECK_NE(image_begin, 0U);
+ for (const char* oat_filename : oat_filenames) {
+ image_info_map_.emplace(oat_filename, ImageInfo());
+ }
+ std::fill_n(image_methods_, arraysize(image_methods_), nullptr);
+}
+
+ImageWriter::ImageInfo::ImageInfo() : intern_table_(new InternTable) {}
+
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 78297ae..e5f7dc7 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -58,33 +58,7 @@
bool compile_app_image,
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*> oat_filenames,
- const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map)
- : compiler_driver_(compiler_driver),
- global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
- image_objects_offset_begin_(0),
- oat_file_(nullptr),
- compile_pic_(compile_pic),
- compile_app_image_(compile_app_image),
- boot_image_space_(nullptr),
- target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
- intern_table_bytes_(0u),
- image_method_array_(ImageHeader::kImageMethodsCount),
- dirty_methods_(0u),
- clean_methods_(0u),
- class_table_bytes_(0u),
- image_storage_mode_(image_storage_mode),
- dex_file_oat_filename_map_(dex_file_oat_filename_map),
- oat_filenames_(oat_filenames),
- default_oat_filename_(oat_filenames[0]) {
- CHECK_NE(image_begin, 0U);
- for (const char* oat_filename : oat_filenames) {
- image_info_map_.emplace(oat_filename, ImageInfo());
- }
- std::fill_n(image_methods_, arraysize(image_methods_), nullptr);
- }
-
- ~ImageWriter() {
- }
+ const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map);
bool PrepareImageAddressSpace();
@@ -237,41 +211,36 @@
};
struct ImageInfo {
- explicit ImageInfo()
- : image_begin_(nullptr),
- image_end_(RoundUp(sizeof(ImageHeader), kObjectAlignment)),
- image_roots_address_(0),
- image_offset_(0),
- image_size_(0),
- oat_offset_(0),
- bin_slot_sizes_(),
- bin_slot_offsets_(),
- bin_slot_count_() {}
+ ImageInfo();
+ ImageInfo(ImageInfo&&) = default;
std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
// Target begin of this image. Notes: It is not valid to write here, this is the address
// of the target image, not necessarily where image_ is mapped. The address is only valid
// after layouting (otherwise null).
- uint8_t* image_begin_;
+ uint8_t* image_begin_ = nullptr;
- size_t image_end_; // Offset to the free space in image_, initially size of image header.
- uint32_t image_roots_address_; // The image roots address in the image.
- size_t image_offset_; // Offset of this image from the start of the first image.
+ // Offset to the free space in image_, initially size of image header.
+ size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
+ uint32_t image_roots_address_ = 0; // The image roots address in the image.
+ size_t image_offset_ = 0; // Offset of this image from the start of the first image.
// Image size is the *address space* covered by this image. As the live bitmap is aligned
// to the page size, the live bitmap will cover more address space than necessary. But live
// bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
// The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
// page-aligned).
- size_t image_size_;
+ size_t image_size_ = 0;
// Oat data.
- size_t oat_offset_; // Offset of the oat file for this image from start of oat files. This is
- // valid when the previous oat file has been written.
- uint8_t* oat_data_begin_; // Start of oatdata in the corresponding oat file. This is
- // valid when the images have been layed out.
- size_t oat_size_; // Size of the corresponding oat data.
+ // Offset of the oat file for this image from start of oat files. This is
+ // valid when the previous oat file has been written.
+ size_t oat_offset_ = 0;
+ // Start of oatdata in the corresponding oat file. This is
+ // valid when the images have been layed out.
+ uint8_t* oat_data_begin_ = nullptr;
+ size_t oat_size_ = 0; // Size of the corresponding oat data.
// Image bitmap which lets us know where the objects inside of the image reside.
std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
@@ -280,12 +249,18 @@
SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
// Offset from oat_data_begin_ to the stubs.
- uint32_t oat_address_offsets_[kOatAddressCount];
+ uint32_t oat_address_offsets_[kOatAddressCount] = {};
// Bin slot tracking for dirty object packing.
- size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin.
- size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins.
- size_t bin_slot_count_[kBinSize]; // Number of objects in a bin.
+ size_t bin_slot_sizes_[kBinSize] = {}; // Number of bytes in a bin.
+ size_t bin_slot_offsets_[kBinSize] = {}; // Number of bytes in previous bins.
+ size_t bin_slot_count_[kBinSize] = {}; // Number of objects in a bin.
+
+ // Cached size of the intern table for when we allocate memory.
+ size_t intern_table_bytes_ = 0;
+
+ // Intern table associated with this for serialization.
+ std::unique_ptr<InternTable> intern_table_;
};
// We use the lock word to store the offset of the object in the image.
@@ -492,9 +467,6 @@
// Mapping of oat filename to image data.
std::unordered_map<std::string, ImageInfo> image_info_map_;
- // Cached size of the intern table for when we allocate memory.
- size_t intern_table_bytes_;
-
// ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
// have one entry per art field for convenience. ArtFields are placed right after the end of the
// image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.