Merge "Add libstdc++ as a target for buildbot builds."
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 81cd6ef..ecc9e76 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -80,7 +80,7 @@
TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
# Jar files for core.art.
-TARGET_CORE_JARS := core-oj core-libart conscrypt okhttp bouncycastle
+TARGET_CORE_JARS := core-oj core-libart conscrypt okhttp bouncycastle apache-xml
HOST_CORE_JARS := $(addsuffix -hostdex,$(TARGET_CORE_JARS))
HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index c9af1c6..ab70367 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -205,7 +205,7 @@
LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
- LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
+ LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp -D jack.dex.output.multidex.legacy=true
endif
include $(BUILD_JAVA_LIBRARY)
$(5) := $$(LOCAL_INSTALLED_MODULE)
@@ -221,7 +221,7 @@
LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
- LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
+ LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp -D jack.dex.output.multidex.legacy=true
endif
include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
$(6) := $$(LOCAL_INSTALLED_MODULE)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 99f7a2a..3d16c49 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -442,8 +442,8 @@
$$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \
$$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
$$($(2)TARGET_OUT_SHARED_LIBRARIES)/libopenjdk.so \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj.jar
+ $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
+ $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
.PHONY: $$(gtest_rule)
$$(gtest_rule): test-art-target-sync
diff --git a/compiler/Android.mk b/compiler/Android.mk
index f0bf499..4589736 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -108,7 +108,8 @@
elf_writer_debug.cc \
elf_writer_quick.cc \
image_writer.cc \
- oat_writer.cc
+ oat_writer.cc \
+ profile_assistant.cc
LIBART_COMPILER_SRC_FILES_arm := \
dex/quick/arm/assemble_arm.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 278c490..b5fd1e0 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -208,8 +208,8 @@
false,
timer_.get(),
-1,
- /* profile_file */ "",
- /* dex_to_oat_map */ nullptr));
+ /* dex_to_oat_map */ nullptr,
+ /* profile_compilation_info */ nullptr));
// We typically don't generate an image in unit tests, disable this optimization by default.
compiler_driver_->SetSupportBootImageFixup(false);
}
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index bcf20c7..12568a4 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -92,7 +92,7 @@
false,
0,
-1,
- "",
+ nullptr,
nullptr);
ClassLinker* linker = nullptr;
CompilationUnit cu(&pool, isa, &driver, linker);
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
index 9deabc0..b39fe4d 100644
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -73,7 +73,7 @@
false,
0,
-1,
- "",
+ nullptr,
nullptr));
cu_.reset(new CompilationUnit(pool_.get(), isa_, compiler_driver_.get(), nullptr));
DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 84fb432..f18fa67 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -45,7 +45,7 @@
false,
nullptr,
-1,
- "",
+ nullptr,
nullptr);
CompiledMethodStorage* storage = driver.GetCompiledMethodStorage();
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index afb4b71..043bd93 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -347,8 +347,8 @@
size_t thread_count, bool dump_stats, bool dump_passes,
const std::string& dump_cfg_file_name, bool dump_cfg_append,
CumulativeLogger* timer, int swap_fd,
- const std::string& profile_file,
- const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map)
+ const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map,
+ const ProfileCompilationInfo* profile_compilation_info)
: compiler_options_(compiler_options),
verification_results_(verification_results),
method_inliner_map_(method_inliner_map),
@@ -377,7 +377,8 @@
support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64),
dex_files_for_oat_file_(nullptr),
dex_file_oat_filename_map_(dex_to_oat_map),
- compiled_method_storage_(swap_fd) {
+ compiled_method_storage_(swap_fd),
+ profile_compilation_info_(profile_compilation_info) {
DCHECK(compiler_options_ != nullptr);
DCHECK(verification_results_ != nullptr);
DCHECK(method_inliner_map_ != nullptr);
@@ -385,12 +386,6 @@
compiler_->Init();
CHECK_EQ(boot_image_, image_classes_.get() != nullptr);
-
- // Read the profile file if one is provided.
- if (!profile_file.empty()) {
- profile_compilation_info_.reset(new ProfileCompilationInfo(profile_file));
- LOG(INFO) << "Using profile data from file " << profile_file;
- }
}
CompilerDriver::~CompilerDriver() {
@@ -2306,15 +2301,11 @@
void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
- if (profile_compilation_info_ != nullptr) {
- if (!profile_compilation_info_->Load(dex_files)) {
- LOG(WARNING) << "Failed to load offline profile info from "
- << profile_compilation_info_->GetFilename()
- << ". No methods will be compiled";
- } else if (kDebugProfileGuidedCompilation) {
- LOG(INFO) << "[ProfileGuidedCompilation] "
- << profile_compilation_info_->DumpInfo();
- }
+ if (kDebugProfileGuidedCompilation) {
+ LOG(INFO) << "[ProfileGuidedCompilation] " <<
+ ((profile_compilation_info_ == nullptr)
+ ? "null"
+ : profile_compilation_info_->DumpInfo(&dex_files));
}
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index fa0cb9a..3847c81 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -97,8 +97,8 @@
size_t thread_count, bool dump_stats, bool dump_passes,
const std::string& dump_cfg_file_name, bool dump_cfg_append,
CumulativeLogger* timer, int swap_fd,
- const std::string& profile_file,
- const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map);
+ const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map,
+ const ProfileCompilationInfo* profile_compilation_info);
~CompilerDriver();
@@ -657,9 +657,6 @@
// This option may be restricted to the boot image, depending on a flag in the implementation.
std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_;
- // Info for profile guided compilation.
- std::unique_ptr<ProfileCompilationInfo> profile_compilation_info_;
-
bool had_hard_verifier_failure_;
size_t thread_count_;
@@ -689,6 +686,9 @@
CompiledMethodStorage compiled_method_storage_;
+ // Info for profile guided compilation.
+ const ProfileCompilationInfo* const profile_compilation_info_;
+
friend class CompileClassVisitor;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 9ad1bee..f8032bb 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -53,7 +53,7 @@
static const bool kDefaultGenerateDebugInfo = kIsDebugBuild;
static const bool kDefaultIncludePatchInformation = false;
static const size_t kDefaultInlineDepthLimit = 3;
- static const size_t kDefaultInlineMaxCodeUnits = 20;
+ static const size_t kDefaultInlineMaxCodeUnits = 32;
static constexpr size_t kUnsetInlineDepthLimit = -1;
static constexpr size_t kUnsetInlineMaxCodeUnits = -1;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 17d0f61..3134297 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -635,11 +635,11 @@
bool ImageWriter::AllocMemory() {
for (const char* oat_filename : oat_filenames_) {
ImageInfo& image_info = GetImageInfo(oat_filename);
- const size_t length = RoundUp(image_objects_offset_begin_ +
- GetBinSizeSum(image_info) +
- intern_table_bytes_ +
- class_table_bytes_,
- kPageSize);
+ ImageSection unused_sections[ImageHeader::kSectionCount];
+ const size_t length = RoundUp(
+ image_info.CreateImageSections(target_ptr_size_, unused_sections),
+ kPageSize);
+
std::string error_msg;
image_info.image_.reset(MemMap::MapAnonymous("image writer image",
nullptr,
@@ -909,14 +909,17 @@
DCHECK(obj != nullptr);
// if it is a string, we want to intern it if its not interned.
if (obj->GetClass()->IsStringClass()) {
+ const char* oat_filename = GetOatFilename(obj);
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+
// we must be an interned string that was forward referenced and already assigned
if (IsImageBinSlotAssigned(obj)) {
- DCHECK_EQ(obj, obj->AsString()->Intern());
+ DCHECK_EQ(obj, image_info.intern_table_->InternStrongImageString(obj->AsString()));
return;
}
// InternImageString allows us to intern while holding the heap bitmap lock. This is safe since
// we are guaranteed to not have GC during image writing.
- mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString(
+ mirror::String* const interned = image_info.intern_table_->InternStrongImageString(
obj->AsString());
if (obj != interned) {
if (!IsImageBinSlotAssigned(interned)) {
@@ -1067,6 +1070,13 @@
};
const char* oat_file = GetOatFilenameForDexCache(dex_cache);
ImageInfo& image_info = GetImageInfo(oat_file);
+ {
+ // Note: This table is only accessed from the image writer, so the lock is technically
+ // unnecessary.
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ // Insert in the class table for this iamge.
+ image_info.class_table_->Insert(as_klass);
+ }
for (LengthPrefixedArray<ArtField>* cur_fields : fields) {
// Total array length including header.
if (cur_fields != nullptr) {
@@ -1249,6 +1259,18 @@
// Calculate size of the dex cache arrays slot and prepare offsets.
PrepareDexCacheArraySlots();
+ // Calculate the sizes of the intern tables and class tables.
+ for (const char* oat_filename : oat_filenames_) {
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ // Calculate how big the intern table will be after being serialized.
+ InternTable* const intern_table = image_info.intern_table_.get();
+ CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
+ image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+ // Calculate the size of the class table.
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr);
+ }
+
// Calculate bin slot offsets.
for (const char* oat_filename : oat_filenames_) {
ImageInfo& image_info = GetImageInfo(oat_filename);
@@ -1275,18 +1297,11 @@
ImageInfo& image_info = GetImageInfo(oat_filename);
image_info.image_begin_ = global_image_begin_ + image_offset;
image_info.image_offset_ = image_offset;
- size_t native_sections_size = image_info.bin_slot_sizes_[kBinArtField] +
- image_info.bin_slot_sizes_[kBinArtMethodDirty] +
- image_info.bin_slot_sizes_[kBinArtMethodClean] +
- image_info.bin_slot_sizes_[kBinDexCacheArray] +
- intern_table_bytes_ +
- class_table_bytes_;
- size_t image_objects = RoundUp(image_info.image_end_, kPageSize);
- size_t bitmap_size =
- RoundUp(gc::accounting::ContinuousSpaceBitmap::ComputeBitmapSize(image_objects), kPageSize);
- size_t heap_size = gc::accounting::ContinuousSpaceBitmap::ComputeHeapSize(bitmap_size);
- size_t max = std::max(heap_size, image_info.image_end_ + native_sections_size + bitmap_size);
- image_info.image_size_ = RoundUp(max, kPageSize);
+ ImageSection unused_sections[ImageHeader::kSectionCount];
+ image_info.image_size_ = RoundUp(
+ image_info.CreateImageSections(target_ptr_size_, unused_sections),
+ kPageSize);
+ // There should be no gaps until the next image.
image_offset += image_info.image_size_;
}
@@ -1310,35 +1325,51 @@
relocation.offset += image_info.bin_slot_offsets_[bin_type];
}
- /* TODO: Reenable the intern table and class table. b/26317072
- // Calculate how big the intern table will be after being serialized.
- InternTable* const intern_table = runtime->GetInternTable();
- CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
- intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
-
- // Write out the class table.
- ClassLinker* class_linker = runtime->GetClassLinker();
- if (boot_image_space_ == nullptr) {
- // Compiling the boot image, add null class loader.
- class_loaders_.insert(nullptr);
- }
- // class_loaders_ usually will not be empty, but may be empty if we attempt to create an image
- // with no classes.
- if (class_loaders_.size() == 1u) {
- // Only write the class table if we have exactly one class loader. There may be cases where
- // there are multiple class loaders if a class path is passed to dex2oat.
- ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (mirror::ClassLoader* loader : class_loaders_) {
- ClassTable* table = class_linker->ClassTableForClassLoader(loader);
- CHECK(table != nullptr);
- class_table_bytes_ += table->WriteToMemory(nullptr);
- }
- }
- */
-
// Note that image_info.image_end_ is left at end of used mirror object section.
}
+size_t ImageWriter::ImageInfo::CreateImageSections(size_t target_ptr_size,
+ ImageSection* out_sections) const {
+ DCHECK(out_sections != nullptr);
+ // Objects section
+ auto* objects_section = &out_sections[ImageHeader::kSectionObjects];
+ *objects_section = ImageSection(0u, image_end_);
+ size_t cur_pos = objects_section->End();
+ // Add field section.
+ auto* field_section = &out_sections[ImageHeader::kSectionArtFields];
+ *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
+ CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset());
+ cur_pos = field_section->End();
+ // Round up to the alignment the required by the method section.
+ cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size));
+ // Add method section.
+ auto* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
+ *methods_section = ImageSection(cur_pos,
+ bin_slot_sizes_[kBinArtMethodClean] +
+ bin_slot_sizes_[kBinArtMethodDirty]);
+ CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
+ cur_pos = methods_section->End();
+ // Add dex cache arrays section.
+ auto* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
+ *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]);
+ CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
+ cur_pos = dex_cache_arrays_section->End();
+ // Round up to the alignment the string table expects. See HashSet::WriteToMemory.
+ cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
+ // Calculate the size of the interned strings.
+ auto* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
+ *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
+ cur_pos = interned_strings_section->End();
+ // Round up to the alignment the class table expects. See HashSet::WriteToMemory.
+ cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
+ // Calculate the size of the class table section.
+ auto* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
+ *class_table_section = ImageSection(cur_pos, class_table_bytes_);
+ cur_pos = class_table_section->End();
+ // Image end goes right before the start of the image bitmap.
+ return cur_pos;
+}
+
void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_NE(0U, oat_loaded_size);
const char* oat_filename = oat_file_->GetLocation().c_str();
@@ -1351,48 +1382,12 @@
// Create the image sections.
ImageSection sections[ImageHeader::kSectionCount];
- // Objects section
- auto* objects_section = §ions[ImageHeader::kSectionObjects];
- *objects_section = ImageSection(0u, image_info.image_end_);
- size_t cur_pos = objects_section->End();
- // Add field section.
- auto* field_section = §ions[ImageHeader::kSectionArtFields];
- *field_section = ImageSection(cur_pos, image_info.bin_slot_sizes_[kBinArtField]);
- CHECK_EQ(image_info.bin_slot_offsets_[kBinArtField], field_section->Offset());
- cur_pos = field_section->End();
- // Round up to the alignment the required by the method section.
- cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size_));
- // Add method section.
- auto* methods_section = §ions[ImageHeader::kSectionArtMethods];
- *methods_section = ImageSection(cur_pos,
- image_info.bin_slot_sizes_[kBinArtMethodClean] +
- image_info.bin_slot_sizes_[kBinArtMethodDirty]);
- CHECK_EQ(image_info.bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
- cur_pos = methods_section->End();
- // Add dex cache arrays section.
- auto* dex_cache_arrays_section = §ions[ImageHeader::kSectionDexCacheArrays];
- *dex_cache_arrays_section = ImageSection(cur_pos, image_info.bin_slot_sizes_[kBinDexCacheArray]);
- CHECK_EQ(image_info.bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
- cur_pos = dex_cache_arrays_section->End();
- // Round up to the alignment the string table expects. See HashSet::WriteToMemory.
- cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
- // Calculate the size of the interned strings.
- auto* interned_strings_section = §ions[ImageHeader::kSectionInternedStrings];
- *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
- cur_pos = interned_strings_section->End();
- // Round up to the alignment the class table expects. See HashSet::WriteToMemory.
- cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
- // Calculate the size of the class table section.
- auto* class_table_section = §ions[ImageHeader::kSectionClassTable];
- *class_table_section = ImageSection(cur_pos, class_table_bytes_);
- cur_pos = class_table_section->End();
- // Image end goes right before the start of the image bitmap.
- const size_t image_end = static_cast<uint32_t>(cur_pos);
+ const size_t image_end = image_info.CreateImageSections(target_ptr_size_, sections);
+
// Finally bitmap section.
const size_t bitmap_bytes = image_info.image_bitmap_->Size();
auto* bitmap_section = §ions[ImageHeader::kSectionImageBitmap];
- *bitmap_section = ImageSection(RoundUp(cur_pos, kPageSize), RoundUp(bitmap_bytes, kPageSize));
- cur_pos = bitmap_section->End();
+ *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize));
if (VLOG_IS_ON(compiler)) {
LOG(INFO) << "Creating header for " << oat_filename;
size_t idx = 0;
@@ -1444,7 +1439,7 @@
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
- *roots[i] = ImageAddress(*roots[i]);
+ *roots[i] = image_writer_->GetImageAddress(*roots[i]);
}
}
@@ -1452,19 +1447,12 @@
const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
- roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr()));
+ roots[i]->Assign(image_writer_->GetImageAddress(roots[i]->AsMirrorPtr()));
}
}
private:
ImageWriter* const image_writer_;
-
- mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
- const size_t offset = image_writer_->GetImageOffset(obj);
- auto* const dest = reinterpret_cast<Object*>(image_writer_->global_image_begin_ + offset);
- VLOG(compiler) << "Update root from " << obj << " to " << dest;
- return dest;
- }
};
void ImageWriter::CopyAndFixupNativeData() {
@@ -1536,54 +1524,48 @@
}
FixupRootVisitor root_visitor(this);
- /* TODO: Reenable the intern table and class table
// Write the intern table into the image.
- const ImageSection& intern_table_section = image_header->GetImageSection(
- ImageHeader::kSectionInternedStrings);
- Runtime* const runtime = Runtime::Current();
- InternTable* const intern_table = runtime->GetInternTable();
- uint8_t* const intern_table_memory_ptr =
- image_info.image_->Begin() + intern_table_section.Offset();
- const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr);
- CHECK_EQ(intern_table_bytes, intern_table_bytes_);
- // Fixup the pointers in the newly written intern table to contain image addresses.
- InternTable temp_intern_table;
- // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
- // the VisitRoots() will update the memory directly rather than the copies.
- // This also relies on visit roots not doing any verification which could fail after we update
- // the roots to be the image addresses.
- temp_intern_table.ReadFromMemory(intern_table_memory_ptr);
- CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
- temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
-
+ if (image_info.intern_table_bytes_ > 0) {
+ const ImageSection& intern_table_section = image_header->GetImageSection(
+ ImageHeader::kSectionInternedStrings);
+ InternTable* const intern_table = image_info.intern_table_.get();
+ uint8_t* const intern_table_memory_ptr =
+ image_info.image_->Begin() + intern_table_section.Offset();
+ const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr);
+ CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_);
+ // Fixup the pointers in the newly written intern table to contain image addresses.
+ InternTable temp_intern_table;
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
+ // the VisitRoots() will update the memory directly rather than the copies.
+ // This also relies on visit roots not doing any verification which could fail after we update
+ // the roots to be the image addresses.
+ temp_intern_table.AddTableFromMemory(intern_table_memory_ptr);
+ CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
+ temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
+ }
// Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple
// class loaders. Writing multiple class tables into the image is currently unsupported.
- if (class_table_bytes_ > 0u) {
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ if (image_info.class_table_bytes_ > 0u) {
const ImageSection& class_table_section = image_header->GetImageSection(
ImageHeader::kSectionClassTable);
uint8_t* const class_table_memory_ptr =
image_info.image_->Begin() + class_table_section.Offset();
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- size_t class_table_bytes = 0;
- for (mirror::ClassLoader* loader : class_loaders_) {
- ClassTable* table = class_linker->ClassTableForClassLoader(loader);
- CHECK(table != nullptr);
- uint8_t* memory_ptr = class_table_memory_ptr + class_table_bytes;
- class_table_bytes += table->WriteToMemory(memory_ptr);
- // Fixup the pointers in the newly written class table to contain image addresses. See
- // above comment for intern tables.
- ClassTable temp_class_table;
- temp_class_table.ReadFromMemory(memory_ptr);
- CHECK_EQ(temp_class_table.NumZygoteClasses(), table->NumNonZygoteClasses() +
- table->NumZygoteClasses());
- BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(&root_visitor,
- RootInfo(kRootUnknown));
- temp_class_table.VisitRoots(buffered_visitor);
- }
- CHECK_EQ(class_table_bytes, class_table_bytes_);
+
+ ClassTable* table = image_info.class_table_.get();
+ CHECK(table != nullptr);
+ const size_t class_table_bytes = table->WriteToMemory(class_table_memory_ptr);
+ CHECK_EQ(class_table_bytes, image_info.class_table_bytes_);
+ // Fixup the pointers in the newly written class table to contain image addresses. See
+ // above comment for intern tables.
+ ClassTable temp_class_table;
+ temp_class_table.ReadFromMemory(class_table_memory_ptr);
+ CHECK_EQ(temp_class_table.NumZygoteClasses(), table->NumNonZygoteClasses() +
+ table->NumZygoteClasses());
+ BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(&root_visitor,
+ RootInfo(kRootUnknown));
+ temp_class_table.VisitRoots(buffered_visitor);
}
- */
}
void ImageWriter::CopyAndFixupObjects() {
@@ -2110,7 +2092,6 @@
}
uint8_t* ImageWriter::GetOatFileBegin(const char* oat_filename) const {
- // DCHECK_GT(intern_table_bytes_, 0u); TODO: Reenable intern table and class table.
uintptr_t last_image_end = 0;
for (const char* oat_fn : oat_filenames_) {
const ImageInfo& image_info = GetConstImageInfo(oat_fn);
@@ -2197,4 +2178,38 @@
}
}
+ImageWriter::ImageWriter(
+ const CompilerDriver& compiler_driver,
+ uintptr_t image_begin,
+ bool compile_pic,
+ bool compile_app_image,
+ ImageHeader::StorageMode image_storage_mode,
+ const std::vector<const char*> oat_filenames,
+ const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map)
+ : compiler_driver_(compiler_driver),
+ global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
+ image_objects_offset_begin_(0),
+ oat_file_(nullptr),
+ compile_pic_(compile_pic),
+ compile_app_image_(compile_app_image),
+ boot_image_space_(nullptr),
+ target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
+ image_method_array_(ImageHeader::kImageMethodsCount),
+ dirty_methods_(0u),
+ clean_methods_(0u),
+ image_storage_mode_(image_storage_mode),
+ dex_file_oat_filename_map_(dex_file_oat_filename_map),
+ oat_filenames_(oat_filenames),
+ default_oat_filename_(oat_filenames[0]) {
+ CHECK_NE(image_begin, 0U);
+ for (const char* oat_filename : oat_filenames) {
+ image_info_map_.emplace(oat_filename, ImageInfo());
+ }
+ std::fill_n(image_methods_, arraysize(image_methods_), nullptr);
+}
+
+ImageWriter::ImageInfo::ImageInfo()
+ : intern_table_(new InternTable),
+ class_table_(new ClassTable) {}
+
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 78297ae..f491a5a 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -47,6 +47,8 @@
} // namespace space
} // namespace gc
+class ClassTable;
+
static constexpr int kInvalidImageFd = -1;
// Write a Space built during compilation for use during execution.
@@ -58,33 +60,7 @@
bool compile_app_image,
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*> oat_filenames,
- const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map)
- : compiler_driver_(compiler_driver),
- global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
- image_objects_offset_begin_(0),
- oat_file_(nullptr),
- compile_pic_(compile_pic),
- compile_app_image_(compile_app_image),
- boot_image_space_(nullptr),
- target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
- intern_table_bytes_(0u),
- image_method_array_(ImageHeader::kImageMethodsCount),
- dirty_methods_(0u),
- clean_methods_(0u),
- class_table_bytes_(0u),
- image_storage_mode_(image_storage_mode),
- dex_file_oat_filename_map_(dex_file_oat_filename_map),
- oat_filenames_(oat_filenames),
- default_oat_filename_(oat_filenames[0]) {
- CHECK_NE(image_begin, 0U);
- for (const char* oat_filename : oat_filenames) {
- image_info_map_.emplace(oat_filename, ImageInfo());
- }
- std::fill_n(image_methods_, arraysize(image_methods_), nullptr);
- }
-
- ~ImageWriter() {
- }
+ const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map);
bool PrepareImageAddressSpace();
@@ -237,41 +213,40 @@
};
struct ImageInfo {
- explicit ImageInfo()
- : image_begin_(nullptr),
- image_end_(RoundUp(sizeof(ImageHeader), kObjectAlignment)),
- image_roots_address_(0),
- image_offset_(0),
- image_size_(0),
- oat_offset_(0),
- bin_slot_sizes_(),
- bin_slot_offsets_(),
- bin_slot_count_() {}
+ ImageInfo();
+ ImageInfo(ImageInfo&&) = default;
+
+ // Create the image sections into the out sections variable, returns the size of the image
+ // excluding the bitmap.
+ size_t CreateImageSections(size_t target_ptr_size, ImageSection* out_sections) const;
std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
// Target begin of this image. Notes: It is not valid to write here, this is the address
// of the target image, not necessarily where image_ is mapped. The address is only valid
// after layouting (otherwise null).
- uint8_t* image_begin_;
+ uint8_t* image_begin_ = nullptr;
- size_t image_end_; // Offset to the free space in image_, initially size of image header.
- uint32_t image_roots_address_; // The image roots address in the image.
- size_t image_offset_; // Offset of this image from the start of the first image.
+ // Offset to the free space in image_, initially size of image header.
+ size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
+ uint32_t image_roots_address_ = 0; // The image roots address in the image.
+ size_t image_offset_ = 0; // Offset of this image from the start of the first image.
// Image size is the *address space* covered by this image. As the live bitmap is aligned
// to the page size, the live bitmap will cover more address space than necessary. But live
// bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
// The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
// page-aligned).
- size_t image_size_;
+ size_t image_size_ = 0;
// Oat data.
- size_t oat_offset_; // Offset of the oat file for this image from start of oat files. This is
- // valid when the previous oat file has been written.
- uint8_t* oat_data_begin_; // Start of oatdata in the corresponding oat file. This is
- // valid when the images have been layed out.
- size_t oat_size_; // Size of the corresponding oat data.
+ // Offset of the oat file for this image from start of oat files. This is
+ // valid when the previous oat file has been written.
+ size_t oat_offset_ = 0;
+ // Start of oatdata in the corresponding oat file. This is
+ // valid when the images have been layed out.
+ uint8_t* oat_data_begin_ = nullptr;
+ size_t oat_size_ = 0; // Size of the corresponding oat data.
// Image bitmap which lets us know where the objects inside of the image reside.
std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
@@ -280,12 +255,24 @@
SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
// Offset from oat_data_begin_ to the stubs.
- uint32_t oat_address_offsets_[kOatAddressCount];
+ uint32_t oat_address_offsets_[kOatAddressCount] = {};
// Bin slot tracking for dirty object packing.
- size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin.
- size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins.
- size_t bin_slot_count_[kBinSize]; // Number of objects in a bin.
+ size_t bin_slot_sizes_[kBinSize] = {}; // Number of bytes in a bin.
+ size_t bin_slot_offsets_[kBinSize] = {}; // Number of bytes in previous bins.
+ size_t bin_slot_count_[kBinSize] = {}; // Number of objects in a bin.
+
+ // Cached size of the intern table for when we allocate memory.
+ size_t intern_table_bytes_ = 0;
+
+ // Number of image class table bytes.
+ size_t class_table_bytes_ = 0;
+
+ // Intern table associated with this image for serialization.
+ std::unique_ptr<InternTable> intern_table_;
+
+ // Class table associated with this image for serialization.
+ std::unique_ptr<ClassTable> class_table_;
};
// We use the lock word to store the offset of the object in the image.
@@ -492,9 +479,6 @@
// Mapping of oat filename to image data.
std::unordered_map<std::string, ImageInfo> image_info_map_;
- // Cached size of the intern table for when we allocate memory.
- size_t intern_table_bytes_;
-
// ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
// have one entry per art field for convenience. ArtFields are placed right after the end of the
// image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
@@ -528,9 +512,6 @@
// null is a valid entry.
std::unordered_set<mirror::ClassLoader*> class_loaders_;
- // Number of image class table bytes.
- size_t class_table_bytes_;
-
// Which mode the image is stored as, see image.h
const ImageHeader::StorageMode image_storage_mode_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index b323d24..85216b7 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -155,8 +155,8 @@
/* dump_cfg_append */ false,
cumulative_logger_.get(),
/* swap_fd */ -1,
- /* profile_file */ "",
- /* dex to oat map */ nullptr));
+ /* dex to oat map */ nullptr,
+ /* profile_compilation_info */ nullptr));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
compiler_driver_->SetSupportBootImageFixup(false);
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 877a674..b10cc35 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -47,7 +47,7 @@
driver_(&compiler_options_, &verification_results_, &inliner_map_,
Compiler::kQuick, instruction_set, nullptr,
false, nullptr, nullptr, nullptr, 1u,
- false, false, "", false, nullptr, -1, "", nullptr),
+ false, false, "", false, nullptr, -1, nullptr, nullptr),
error_msg_(),
instruction_set_(instruction_set),
features_(InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg_)),
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 58f46d6..9f7ffa5 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -121,7 +121,7 @@
false,
timer_.get(),
-1,
- "",
+ nullptr,
nullptr));
}
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 4dd0d26..1af6846 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -1817,7 +1817,12 @@
UpdateLocal(destination, current_block_->GetLastInstruction(), dex_pc);
} else {
DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
+ // We emit a CheckCast followed by a BoundType. CheckCast is a statement
+ // which may throw. If it succeeds BoundType sets the new type of `object`
+ // for all subsequent uses.
current_block_->AddInstruction(new (arena_) HCheckCast(object, cls, check_kind, dex_pc));
+ current_block_->AddInstruction(new (arena_) HBoundType(object, dex_pc));
+ UpdateLocal(reference, current_block_->GetLastInstruction(), dex_pc);
}
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 07efdee..4648606 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1191,17 +1191,16 @@
uint32_t dex_pc,
SlowPathCode* slow_path,
bool is_direct_entrypoint) {
+ __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
+ __ Jalr(T9);
if (is_direct_entrypoint) {
// Reserve argument space on stack (for $a0-$a3) for
// entrypoints that directly reference native implementations.
// Called function may use this space to store $a0-$a3 regs.
- __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
- }
- __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
- __ Jalr(T9);
- __ Nop();
- if (is_direct_entrypoint) {
+ __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset); // Single instruction in delay slot.
__ DecreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
+ } else {
+ __ Nop(); // In delay slot.
}
RecordPcInfo(instruction, dex_pc, slow_path);
}
@@ -1275,15 +1274,9 @@
}
case Primitive::kPrimLong: {
- // TODO: can 2nd param be const?
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- if (instruction->IsAdd() || instruction->IsSub()) {
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
- } else {
- DCHECK(instruction->IsAnd() || instruction->IsOr() || instruction->IsXor());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -1350,34 +1343,142 @@
}
case Primitive::kPrimLong: {
- // TODO: can 2nd param be const?
Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
Register dst_low = locations->Out().AsRegisterPairLow<Register>();
Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
- Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
- Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
-
- if (instruction->IsAnd()) {
- __ And(dst_low, lhs_low, rhs_low);
- __ And(dst_high, lhs_high, rhs_high);
- } else if (instruction->IsOr()) {
- __ Or(dst_low, lhs_low, rhs_low);
- __ Or(dst_high, lhs_high, rhs_high);
- } else if (instruction->IsXor()) {
- __ Xor(dst_low, lhs_low, rhs_low);
- __ Xor(dst_high, lhs_high, rhs_high);
- } else if (instruction->IsAdd()) {
- __ Addu(dst_low, lhs_low, rhs_low);
- __ Sltu(TMP, dst_low, lhs_low);
- __ Addu(dst_high, lhs_high, rhs_high);
- __ Addu(dst_high, dst_high, TMP);
+ Location rhs_location = locations->InAt(1);
+ bool use_imm = rhs_location.IsConstant();
+ if (!use_imm) {
+ Register rhs_high = rhs_location.AsRegisterPairHigh<Register>();
+ Register rhs_low = rhs_location.AsRegisterPairLow<Register>();
+ if (instruction->IsAnd()) {
+ __ And(dst_low, lhs_low, rhs_low);
+ __ And(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsOr()) {
+ __ Or(dst_low, lhs_low, rhs_low);
+ __ Or(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsXor()) {
+ __ Xor(dst_low, lhs_low, rhs_low);
+ __ Xor(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsAdd()) {
+ if (lhs_low == rhs_low) {
+ // Special case for lhs = rhs and the sum potentially overwriting both lhs and rhs.
+ __ Slt(TMP, lhs_low, ZERO);
+ __ Addu(dst_low, lhs_low, rhs_low);
+ } else {
+ __ Addu(dst_low, lhs_low, rhs_low);
+ // If the sum overwrites rhs, lhs remains unchanged, otherwise rhs remains unchanged.
+ __ Sltu(TMP, dst_low, (dst_low == rhs_low) ? lhs_low : rhs_low);
+ }
+ __ Addu(dst_high, lhs_high, rhs_high);
+ __ Addu(dst_high, dst_high, TMP);
+ } else {
+ DCHECK(instruction->IsSub());
+ __ Sltu(TMP, lhs_low, rhs_low);
+ __ Subu(dst_low, lhs_low, rhs_low);
+ __ Subu(dst_high, lhs_high, rhs_high);
+ __ Subu(dst_high, dst_high, TMP);
+ }
} else {
- DCHECK(instruction->IsSub());
- __ Subu(dst_low, lhs_low, rhs_low);
- __ Sltu(TMP, lhs_low, dst_low);
- __ Subu(dst_high, lhs_high, rhs_high);
- __ Subu(dst_high, dst_high, TMP);
+ int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
+ if (instruction->IsOr()) {
+ uint32_t low = Low32Bits(value);
+ uint32_t high = High32Bits(value);
+ if (IsUint<16>(low)) {
+ if (dst_low != lhs_low || low != 0) {
+ __ Ori(dst_low, lhs_low, low);
+ }
+ } else {
+ __ LoadConst32(TMP, low);
+ __ Or(dst_low, lhs_low, TMP);
+ }
+ if (IsUint<16>(high)) {
+ if (dst_high != lhs_high || high != 0) {
+ __ Ori(dst_high, lhs_high, high);
+ }
+ } else {
+ if (high != low) {
+ __ LoadConst32(TMP, high);
+ }
+ __ Or(dst_high, lhs_high, TMP);
+ }
+ } else if (instruction->IsXor()) {
+ uint32_t low = Low32Bits(value);
+ uint32_t high = High32Bits(value);
+ if (IsUint<16>(low)) {
+ if (dst_low != lhs_low || low != 0) {
+ __ Xori(dst_low, lhs_low, low);
+ }
+ } else {
+ __ LoadConst32(TMP, low);
+ __ Xor(dst_low, lhs_low, TMP);
+ }
+ if (IsUint<16>(high)) {
+ if (dst_high != lhs_high || high != 0) {
+ __ Xori(dst_high, lhs_high, high);
+ }
+ } else {
+ if (high != low) {
+ __ LoadConst32(TMP, high);
+ }
+ __ Xor(dst_high, lhs_high, TMP);
+ }
+ } else if (instruction->IsAnd()) {
+ uint32_t low = Low32Bits(value);
+ uint32_t high = High32Bits(value);
+ if (IsUint<16>(low)) {
+ __ Andi(dst_low, lhs_low, low);
+ } else if (low != 0xFFFFFFFF) {
+ __ LoadConst32(TMP, low);
+ __ And(dst_low, lhs_low, TMP);
+ } else if (dst_low != lhs_low) {
+ __ Move(dst_low, lhs_low);
+ }
+ if (IsUint<16>(high)) {
+ __ Andi(dst_high, lhs_high, high);
+ } else if (high != 0xFFFFFFFF) {
+ if (high != low) {
+ __ LoadConst32(TMP, high);
+ }
+ __ And(dst_high, lhs_high, TMP);
+ } else if (dst_high != lhs_high) {
+ __ Move(dst_high, lhs_high);
+ }
+ } else {
+ if (instruction->IsSub()) {
+ value = -value;
+ } else {
+ DCHECK(instruction->IsAdd());
+ }
+ int32_t low = Low32Bits(value);
+ int32_t high = High32Bits(value);
+ if (IsInt<16>(low)) {
+ if (dst_low != lhs_low || low != 0) {
+ __ Addiu(dst_low, lhs_low, low);
+ }
+ if (low != 0) {
+ __ Sltiu(AT, dst_low, low);
+ }
+ } else {
+ __ LoadConst32(TMP, low);
+ __ Addu(dst_low, lhs_low, TMP);
+ __ Sltu(AT, dst_low, TMP);
+ }
+ if (IsInt<16>(high)) {
+ if (dst_high != lhs_high || high != 0) {
+ __ Addiu(dst_high, lhs_high, high);
+ }
+ } else {
+ if (high != low) {
+ __ LoadConst32(TMP, high);
+ }
+ __ Addu(dst_high, lhs_high, TMP);
+ }
+ if (low != 0) {
+ __ Addu(dst_high, dst_high, AT);
+ }
+ }
}
break;
}
@@ -1416,12 +1517,15 @@
Primitive::Type type = instr->GetResultType();
switch (type) {
case Primitive::kPrimInt:
- case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
locations->SetOut(Location::RequiresRegister());
break;
- }
default:
LOG(FATAL) << "Unexpected shift type " << type;
}
@@ -1440,6 +1544,8 @@
int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
uint32_t shift_mask = (type == Primitive::kPrimInt) ? kMaxIntShiftValue : kMaxLongShiftValue;
uint32_t shift_value = rhs_imm & shift_mask;
+ // Is the INS (Insert Bit Field) instruction supported?
+ bool has_ins = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
switch (type) {
case Primitive::kPrimInt: {
@@ -1474,21 +1580,37 @@
if (shift_value == 0) {
codegen_->Move64(locations->Out(), locations->InAt(0));
} else if (shift_value < kMipsBitsPerWord) {
- if (instr->IsShl()) {
- __ Sll(dst_low, lhs_low, shift_value);
- __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value);
- __ Sll(dst_high, lhs_high, shift_value);
- __ Or(dst_high, dst_high, TMP);
- } else if (instr->IsShr()) {
- __ Sra(dst_high, lhs_high, shift_value);
- __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
- __ Srl(dst_low, lhs_low, shift_value);
- __ Or(dst_low, dst_low, TMP);
+ if (has_ins) {
+ if (instr->IsShl()) {
+ __ Srl(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
+ __ Ins(dst_high, lhs_high, shift_value, kMipsBitsPerWord - shift_value);
+ __ Sll(dst_low, lhs_low, shift_value);
+ } else if (instr->IsShr()) {
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
+ __ Sra(dst_high, lhs_high, shift_value);
+ } else {
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
+ __ Srl(dst_high, lhs_high, shift_value);
+ }
} else {
- __ Srl(dst_high, lhs_high, shift_value);
- __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
- __ Srl(dst_low, lhs_low, shift_value);
- __ Or(dst_low, dst_low, TMP);
+ if (instr->IsShl()) {
+ __ Sll(dst_low, lhs_low, shift_value);
+ __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value);
+ __ Sll(dst_high, lhs_high, shift_value);
+ __ Or(dst_high, dst_high, TMP);
+ } else if (instr->IsShr()) {
+ __ Sra(dst_high, lhs_high, shift_value);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ } else {
+ __ Srl(dst_high, lhs_high, shift_value);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ }
}
} else {
shift_value -= kMipsBitsPerWord;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index fd18917..a808c27 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1335,9 +1335,10 @@
void InstructionCodeGeneratorX86::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
+template<class LabelType>
void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond,
- Label* true_label,
- Label* false_label) {
+ LabelType* true_label,
+ LabelType* false_label) {
if (cond->IsFPConditionTrueIfNaN()) {
__ j(kUnordered, true_label);
} else if (cond->IsFPConditionFalseIfNaN()) {
@@ -1346,9 +1347,10 @@
__ j(X86UnsignedOrFPCondition(cond->GetCondition()), true_label);
}
+template<class LabelType>
void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
- Label* true_label,
- Label* false_label) {
+ LabelType* true_label,
+ LabelType* false_label) {
LocationSummary* locations = cond->GetLocations();
Location left = locations->InAt(0);
Location right = locations->InAt(1);
@@ -1437,14 +1439,15 @@
__ j(final_condition, true_label);
}
+template<class LabelType>
void InstructionCodeGeneratorX86::GenerateCompareTestAndBranch(HCondition* condition,
- Label* true_target_in,
- Label* false_target_in) {
+ LabelType* true_target_in,
+ LabelType* false_target_in) {
// Generated branching requires both targets to be explicit. If either of the
// targets is nullptr (fallthrough) use and bind `fallthrough_target` instead.
- Label fallthrough_target;
- Label* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
- Label* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
+ LabelType fallthrough_target;
+ LabelType* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
+ LabelType* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
LocationSummary* locations = condition->GetLocations();
Location left = locations->InAt(0);
@@ -1486,10 +1489,11 @@
!Primitive::IsFloatingPointType(cond->InputAt(0)->GetType());
}
+template<class LabelType>
void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
- Label* true_target,
- Label* false_target) {
+ LabelType* true_target,
+ LabelType* false_target) {
HInstruction* cond = instruction->InputAt(condition_input_index);
if (true_target == nullptr && false_target == nullptr) {
@@ -1613,7 +1617,7 @@
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target */ static_cast<Label*>(nullptr));
}
void LocationsBuilderX86::VisitNativeDebugInfo(HNativeDebugInfo* info) {
@@ -1709,7 +1713,7 @@
Location lhs = locations->InAt(0);
Location rhs = locations->InAt(1);
Register reg = locations->Out().AsRegister<Register>();
- Label true_label, false_label;
+ NearLabel true_label, false_label;
switch (cond->InputAt(0)->GetType()) {
default: {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 3d34317..df73476 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -267,15 +267,22 @@
void GenerateImplicitNullCheck(HNullCheck* instruction);
void GenerateExplicitNullCheck(HNullCheck* instruction);
+ template<class LabelType>
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
- Label* true_target,
- Label* false_target);
+ LabelType* true_target,
+ LabelType* false_target);
+ template<class LabelType>
void GenerateCompareTestAndBranch(HCondition* condition,
- Label* true_target,
- Label* false_target);
- void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
- void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
+ LabelType* true_target,
+ LabelType* false_target);
+ template<class LabelType>
+ void GenerateFPJumps(HCondition* cond, LabelType* true_label, LabelType* false_label);
+ template<class LabelType>
+ void GenerateLongComparesAndJumps(HCondition* cond,
+ LabelType* true_label,
+ LabelType* false_label);
+
void HandleGoto(HInstruction* got, HBasicBlock* successor);
void GenPackedSwitchWithCompares(Register value_reg,
int32_t lower_bound,
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 7c94a8c..76a4ce2 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1370,9 +1370,10 @@
void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
+template<class LabelType>
void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond,
- Label* true_label,
- Label* false_label) {
+ LabelType* true_label,
+ LabelType* false_label) {
if (cond->IsFPConditionTrueIfNaN()) {
__ j(kUnordered, true_label);
} else if (cond->IsFPConditionFalseIfNaN()) {
@@ -1381,14 +1382,15 @@
__ j(X86_64FPCondition(cond->GetCondition()), true_label);
}
+template<class LabelType>
void InstructionCodeGeneratorX86_64::GenerateCompareTestAndBranch(HCondition* condition,
- Label* true_target_in,
- Label* false_target_in) {
+ LabelType* true_target_in,
+ LabelType* false_target_in) {
// Generated branching requires both targets to be explicit. If either of the
// targets is nullptr (fallthrough) use and bind `fallthrough_target` instead.
- Label fallthrough_target;
- Label* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
- Label* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
+ LabelType fallthrough_target;
+ LabelType* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
+ LabelType* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
LocationSummary* locations = condition->GetLocations();
Location left = locations->InAt(0);
@@ -1470,10 +1472,11 @@
!Primitive::IsFloatingPointType(cond->InputAt(0)->GetType());
}
+template<class LabelType>
void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
- Label* true_target,
- Label* false_target) {
+ LabelType* true_target,
+ LabelType* false_target) {
HInstruction* cond = instruction->InputAt(condition_input_index);
if (true_target == nullptr && false_target == nullptr) {
@@ -1597,7 +1600,7 @@
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target */ static_cast<Label*>(nullptr));
}
void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
@@ -1684,7 +1687,7 @@
Location lhs = locations->InAt(0);
Location rhs = locations->InAt(1);
CpuRegister reg = locations->Out().AsRegister<CpuRegister>();
- Label true_label, false_label;
+ NearLabel true_label, false_label;
switch (cond->InputAt(0)->GetType()) {
default:
@@ -5747,7 +5750,7 @@
is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
- Label done;
+ NearLabel done;
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
__ testl(obj, obj);
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 9995416..c5e8a04 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -258,14 +258,18 @@
void GenerateExplicitNullCheck(HNullCheck* instruction);
void PushOntoFPStack(Location source, uint32_t temp_offset,
uint32_t stack_adjustment, bool is_float);
+ template<class LabelType>
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
- Label* true_target,
- Label* false_target);
+ LabelType* true_target,
+ LabelType* false_target);
+ template<class LabelType>
void GenerateCompareTestAndBranch(HCondition* condition,
- Label* true_target,
- Label* false_target);
- void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
+ LabelType* true_target,
+ LabelType* false_target);
+ template<class LabelType>
+ void GenerateFPJumps(HCondition* cond, LabelType* true_label, LabelType* false_label);
+
void HandleGoto(HInstruction* got, HBasicBlock* successor);
X86_64Assembler* const assembler_;
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index f3c1dbe..6d0bdbe 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -763,6 +763,14 @@
phi->GetId(),
phi->GetRegNumber(),
type_str.str().c_str()));
+ } else if (phi->GetType() == Primitive::kPrimNot) {
+ std::stringstream type_str;
+ type_str << other_phi->GetType();
+ AddError(StringPrintf(
+ "Equivalent non-reference phi (%d) found for VReg %d with type: %s.",
+ phi->GetId(),
+ phi->GetRegNumber(),
+ type_str.str().c_str()));
} else {
ArenaBitVector visited(GetGraph()->GetArena(), 0, /* expandable */ true);
if (!IsConstantEquivalent(phi, other_phi, &visited)) {
@@ -913,4 +921,16 @@
}
}
+void SSAChecker::VisitBoundType(HBoundType* instruction) {
+ VisitInstruction(instruction);
+
+ ScopedObjectAccess soa(Thread::Current());
+ if (!instruction->GetUpperBound().IsValid()) {
+ AddError(StringPrintf(
+ "%s %d does not have a valid upper bound RTI.",
+ instruction->DebugName(),
+ instruction->GetId()));
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index d5ddbab..2e16bfe 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -128,6 +128,7 @@
void VisitPackedSwitch(HPackedSwitch* instruction) OVERRIDE;
void VisitBooleanNot(HBooleanNot* instruction) OVERRIDE;
void VisitConstant(HConstant* instruction) OVERRIDE;
+ void VisitBoundType(HBoundType* instruction) OVERRIDE;
void HandleBooleanInput(HInstruction* instruction, size_t input_index);
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 776c115..29a1845 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -85,6 +85,7 @@
constant0_ = graph_->GetIntConstant(0);
constant1_ = graph_->GetIntConstant(1);
constant100_ = graph_->GetIntConstant(100);
+ float_constant0_ = graph_->GetFloatConstant(0.0f);
induc_ = new (&allocator_) HLocal(n);
entry_->AddInstruction(induc_);
entry_->AddInstruction(new (&allocator_) HStoreLocal(induc_, constant0_));
@@ -156,8 +157,10 @@
HInstruction* InsertArrayStore(HLocal* subscript, int d) {
HInstruction* load = InsertInstruction(
new (&allocator_) HLoadLocal(subscript, Primitive::kPrimInt), d);
+ // ArraySet is given a float value in order to avoid SsaBuilder typing
+ // it from the array's non-existent reference type info.
return InsertInstruction(new (&allocator_) HArraySet(
- parameter_, load, constant0_, Primitive::kPrimInt, 0), d);
+ parameter_, load, float_constant0_, Primitive::kPrimFloat, 0), d);
}
// Returns induction information of instruction in loop at depth d.
@@ -187,6 +190,7 @@
HInstruction* constant0_;
HInstruction* constant1_;
HInstruction* constant100_;
+ HInstruction* float_constant0_;
HLocal* induc_; // "vreg_n", the "k"
HLocal* tmp_; // "vreg_n+1"
HLocal* dum_; // "vreg_n+2"
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 0e50416..48d3299 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -42,7 +42,14 @@
namespace art {
-static constexpr size_t kMaximumNumberOfHInstructions = 12;
+static constexpr size_t kMaximumNumberOfHInstructions = 32;
+
+// Limit the number of dex registers that we accumulate while inlining
+// to avoid creating large amount of nested environments.
+static constexpr size_t kMaximumNumberOfCumulatedDexRegisters = 64;
+
+// Avoid inlining within a huge method due to memory pressure.
+static constexpr size_t kMaximumCodeUnitSize = 4096;
void HInliner::Run() {
const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
@@ -50,6 +57,9 @@
|| (compiler_options.GetInlineMaxCodeUnits() == 0)) {
return;
}
+ if (caller_compilation_unit_.GetCodeItem()->insns_size_in_code_units_ > kMaximumCodeUnitSize) {
+ return;
+ }
if (graph_->IsDebuggable()) {
// For simplicity, we currently never inline when the graph is debuggable. This avoids
// doing some logic in the runtime to discover if a method could have been inlined.
@@ -216,6 +226,7 @@
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
// We can query the dex cache directly. The verifier has populated it already.
ArtMethod* resolved_method;
+ ArtMethod* actual_method = nullptr;
if (invoke_instruction->IsInvokeStaticOrDirect()) {
if (invoke_instruction->AsInvokeStaticOrDirect()->IsStringInit()) {
VLOG(compiler) << "Not inlining a String.<init> method";
@@ -227,9 +238,15 @@
: class_linker->FindDexCache(soa.Self(), *ref.dex_file);
resolved_method = dex_cache->GetResolvedMethod(
ref.dex_method_index, class_linker->GetImagePointerSize());
+ // actual_method == resolved_method for direct or static calls.
+ actual_method = resolved_method;
} else {
resolved_method = caller_compilation_unit_.GetDexCache().Get()->GetResolvedMethod(
method_index, class_linker->GetImagePointerSize());
+ if (resolved_method != nullptr) {
+ // Check if we can statically find the method.
+ actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
+ }
}
if (resolved_method == nullptr) {
@@ -239,15 +256,10 @@
return false;
}
- if (invoke_instruction->IsInvokeStaticOrDirect()) {
- return TryInline(invoke_instruction, resolved_method);
- }
-
- // Check if we can statically find the method.
- ArtMethod* actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
if (actual_method != nullptr) {
return TryInline(invoke_instruction, actual_method);
}
+ DCHECK(!invoke_instruction->IsInvokeStaticOrDirect());
// Check if we can use an inline cache.
ArtMethod* caller = graph_->GetArtMethod();
@@ -589,6 +601,7 @@
compiler_driver_,
handles_,
stats_,
+ total_number_of_dex_registers_ + code_item->registers_size_,
depth_ + 1);
inliner.Run();
number_of_instructions_budget += inliner.number_of_inlined_instructions_;
@@ -620,6 +633,10 @@
HReversePostOrderIterator it(*callee_graph);
it.Advance(); // Past the entry block, it does not contain instructions that prevent inlining.
size_t number_of_instructions = 0;
+
+ bool can_inline_environment =
+ total_number_of_dex_registers_ < kMaximumNumberOfCumulatedDexRegisters;
+
for (; !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
if (block->IsLoopHeader()) {
@@ -633,10 +650,17 @@
instr_it.Advance()) {
if (number_of_instructions++ == number_of_instructions_budget) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
- << " could not be inlined because it is too big.";
+ << " is not inlined because its caller has reached"
+ << " its instruction budget limit.";
return false;
}
HInstruction* current = instr_it.Current();
+ if (!can_inline_environment && current->NeedsEnvironment()) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
+ << " is not inlined because its caller has reached"
+ << " its environment budget limit.";
+ return false;
+ }
if (current->IsInvokeInterface()) {
// Disable inlining of interface calls. The cost in case of entering the
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 7b9fb73..8de510e 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -40,13 +40,15 @@
CompilerDriver* compiler_driver,
StackHandleScopeCollection* handles,
OptimizingCompilerStats* stats,
- size_t depth = 0)
+ size_t total_number_of_dex_registers,
+ size_t depth)
: HOptimization(outer_graph, kInlinerPassName, stats),
outermost_graph_(outermost_graph),
outer_compilation_unit_(outer_compilation_unit),
caller_compilation_unit_(caller_compilation_unit),
codegen_(codegen),
compiler_driver_(compiler_driver),
+ total_number_of_dex_registers_(total_number_of_dex_registers),
depth_(depth),
number_of_inlined_instructions_(0),
handles_(handles) {}
@@ -88,6 +90,7 @@
const DexCompilationUnit& caller_compilation_unit_;
CodeGenerator* const codegen_;
CompilerDriver* const compiler_driver_;
+ const size_t total_number_of_dex_registers_;
const size_t depth_;
size_t number_of_inlined_instructions_;
StackHandleScopeCollection* const handles_;
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index aa60fd6..2b63ec8 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -65,7 +65,8 @@
// Provide boiler-plate instructions.
parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry_->AddInstruction(parameter_);
- constant_ = graph_->GetIntConstant(42);
+ int_constant_ = graph_->GetIntConstant(42);
+ float_constant_ = graph_->GetFloatConstant(42.0f);
loop_preheader_->AddInstruction(new (&allocator_) HGoto());
loop_header_->AddInstruction(new (&allocator_) HIf(parameter_));
loop_body_->AddInstruction(new (&allocator_) HGoto());
@@ -95,7 +96,8 @@
HBasicBlock* exit_;
HInstruction* parameter_; // "this"
- HInstruction* constant_;
+ HInstruction* int_constant_;
+ HInstruction* float_constant_;
};
//
@@ -118,7 +120,7 @@
0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
- parameter_, constant_, Primitive::kPrimInt, MemberOffset(20),
+ parameter_, int_constant_, Primitive::kPrimInt, MemberOffset(20),
false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), dex_cache, 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
@@ -167,11 +169,13 @@
BuildLoop();
// Populate the loop with instructions: set/get array with different types.
+ // ArrayGet is typed as kPrimByte and ArraySet given a float value in order to
+ // avoid SsaBuilder's typing of ambiguous array operations from reference type info.
HInstruction* get_array = new (&allocator_) HArrayGet(
- parameter_, constant_, Primitive::kPrimByte, 0);
+ parameter_, int_constant_, Primitive::kPrimByte, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
HInstruction* set_array = new (&allocator_) HArraySet(
- parameter_, constant_, constant_, Primitive::kPrimShort, 0);
+ parameter_, int_constant_, float_constant_, Primitive::kPrimShort, 0);
loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
EXPECT_EQ(get_array->GetBlock(), loop_body_);
@@ -185,11 +189,13 @@
BuildLoop();
// Populate the loop with instructions: set/get array with same types.
+ // ArrayGet is typed as kPrimByte and ArraySet given a float value in order to
+ // avoid SsaBuilder's typing of ambiguous array operations from reference type info.
HInstruction* get_array = new (&allocator_) HArrayGet(
- parameter_, constant_, Primitive::kPrimByte, 0);
+ parameter_, int_constant_, Primitive::kPrimByte, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
HInstruction* set_array = new (&allocator_) HArraySet(
- parameter_, get_array, constant_, Primitive::kPrimByte, 0);
+ parameter_, get_array, float_constant_, Primitive::kPrimByte, 0);
loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
EXPECT_EQ(get_array->GetBlock(), loop_body_);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 727f2bb..2b313f6 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -678,16 +678,6 @@
}
}
- static bool IsIntFloatAlias(Primitive::Type type1, Primitive::Type type2) {
- return (type1 == Primitive::kPrimFloat && type2 == Primitive::kPrimInt) ||
- (type2 == Primitive::kPrimFloat && type1 == Primitive::kPrimInt);
- }
-
- static bool IsLongDoubleAlias(Primitive::Type type1, Primitive::Type type2) {
- return (type1 == Primitive::kPrimDouble && type2 == Primitive::kPrimLong) ||
- (type2 == Primitive::kPrimDouble && type1 == Primitive::kPrimLong);
- }
-
void VisitGetLocation(HInstruction* instruction,
HInstruction* ref,
size_t offset,
@@ -716,22 +706,14 @@
// Get the real heap value of the store.
heap_value = store->InputAt(1);
}
- if ((heap_value != kUnknownHeapValue) &&
- // Keep the load due to possible I/F, J/D array aliasing.
- // See b/22538329 for details.
- !IsIntFloatAlias(heap_value->GetType(), instruction->GetType()) &&
- !IsLongDoubleAlias(heap_value->GetType(), instruction->GetType())) {
+ if (heap_value == kUnknownHeapValue) {
+ // Load isn't eliminated. Put the load as the value into the HeapLocation.
+ // This acts like GVN but with better aliasing analysis.
+ heap_values[idx] = instruction;
+ } else {
removed_loads_.push_back(instruction);
substitute_instructions_for_loads_.push_back(heap_value);
TryRemovingNullCheck(instruction);
- return;
- }
-
- // Load isn't eliminated.
- if (heap_value == kUnknownHeapValue) {
- // Put the load as the value into the HeapLocation.
- // This acts like GVN but with better aliasing analysis.
- heap_values[idx] = instruction;
}
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index fc12224..c85e573 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2060,6 +2060,16 @@
new_pre_header->SetTryCatchInformation(try_catch_info);
}
+static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (rti.IsValid()) {
+ DCHECK(upper_bound_rti.IsSupertypeOf(rti))
+ << " upper_bound_rti: " << upper_bound_rti
+ << " rti: " << rti;
+ DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact());
+ }
+}
+
void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) {
if (kIsDebugBuild) {
DCHECK_EQ(GetType(), Primitive::kPrimNot);
@@ -2068,16 +2078,23 @@
if (IsBoundType()) {
// Having the test here spares us from making the method virtual just for
// the sake of a DCHECK.
- ReferenceTypeInfo upper_bound_rti = AsBoundType()->GetUpperBound();
- DCHECK(upper_bound_rti.IsSupertypeOf(rti))
- << " upper_bound_rti: " << upper_bound_rti
- << " rti: " << rti;
- DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact());
+ CheckAgainstUpperBound(rti, AsBoundType()->GetUpperBound());
}
}
reference_type_info_ = rti;
}
+void HBoundType::SetUpperBound(const ReferenceTypeInfo& upper_bound, bool can_be_null) {
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(upper_bound.IsValid());
+ DCHECK(!upper_bound_.IsValid()) << "Upper bound should only be set once.";
+ CheckAgainstUpperBound(GetReferenceTypeInfo(), upper_bound);
+ }
+ upper_bound_ = upper_bound;
+ upper_can_be_null_ = can_be_null;
+}
+
ReferenceTypeInfo::ReferenceTypeInfo() : type_handle_(TypeHandle()), is_exact_(false) {}
ReferenceTypeInfo::ReferenceTypeInfo(TypeHandle type_handle, bool is_exact)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 5b072cf..1a7cbde 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -101,7 +101,7 @@
enum BuildSsaResult {
kBuildSsaFailNonNaturalLoop,
kBuildSsaFailThrowCatchLoop,
- kBuildSsaFailAmbiguousArrayGet,
+ kBuildSsaFailAmbiguousArrayOp,
kBuildSsaSuccess,
};
@@ -240,7 +240,7 @@
// Returns true if the type information provide the same amount of details.
// Note that it does not mean that the instructions have the same actual type
// (because the type can be the result of a merge).
- bool IsEqual(ReferenceTypeInfo rti) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsEqual(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
if (!IsValid() && !rti.IsValid()) {
// Invalid types are equal.
return true;
@@ -5431,24 +5431,19 @@
class HBoundType : public HExpression<1> {
public:
- // Constructs an HBoundType with the given upper_bound.
- // Ensures that the upper_bound is valid.
- HBoundType(HInstruction* input,
- ReferenceTypeInfo upper_bound,
- bool upper_can_be_null,
- uint32_t dex_pc = kNoDexPc)
+ HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc),
- upper_bound_(upper_bound),
- upper_can_be_null_(upper_can_be_null),
- can_be_null_(upper_can_be_null) {
+ upper_bound_(ReferenceTypeInfo::CreateInvalid()),
+ upper_can_be_null_(true),
+ can_be_null_(true) {
DCHECK_EQ(input->GetType(), Primitive::kPrimNot);
SetRawInputAt(0, input);
- SetReferenceTypeInfo(upper_bound_);
}
- // GetUpper* should only be used in reference type propagation.
+ // {Get,Set}Upper* should only be used in reference type propagation.
const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
bool GetUpperCanBeNull() const { return upper_can_be_null_; }
+ void SetUpperBound(const ReferenceTypeInfo& upper_bound, bool can_be_null);
void SetCanBeNull(bool can_be_null) {
DCHECK(upper_can_be_null_ || !can_be_null);
@@ -5466,10 +5461,10 @@
// if (x instanceof ClassX) {
// // uper_bound_ will be ClassX
// }
- const ReferenceTypeInfo upper_bound_;
+ ReferenceTypeInfo upper_bound_;
// Represents the top constraint that can_be_null_ cannot exceed (i.e. if this
// is false then can_be_null_ cannot be true).
- const bool upper_can_be_null_;
+ bool upper_can_be_null_;
bool can_be_null_;
DISALLOW_COPY_AND_ASSIGN(HBoundType);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 3de870e..3eb7274 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -426,8 +426,18 @@
if (!should_inline) {
return;
}
+ size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
HInliner* inliner = new (graph->GetArena()) HInliner(
- graph, graph, codegen, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
+ graph,
+ graph,
+ codegen,
+ dex_compilation_unit,
+ dex_compilation_unit,
+ driver,
+ handles,
+ stats,
+ number_of_dex_registers,
+ /* depth */ 0);
HOptimization* optimizations[] = { inliner };
RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
@@ -776,8 +786,8 @@
case kBuildSsaFailThrowCatchLoop:
MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
break;
- case kBuildSsaFailAmbiguousArrayGet:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayGet);
+ case kBuildSsaFailAmbiguousArrayOp:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
break;
case kBuildSsaSuccess:
UNREACHABLE();
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 4713514..bca1632 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -40,7 +40,7 @@
kNotCompiledBranchOutsideMethodCode,
kNotCompiledNonNaturalLoop,
kNotCompiledThrowCatchLoop,
- kNotCompiledAmbiguousArrayGet,
+ kNotCompiledAmbiguousArrayOp,
kNotCompiledHugeMethod,
kNotCompiledLargeMethodNoBranches,
kNotCompiledMalformedOpcode,
@@ -108,7 +108,7 @@
case kNotCompiledBranchOutsideMethodCode: name = "NotCompiledBranchOutsideMethodCode"; break;
case kNotCompiledNonNaturalLoop : name = "NotCompiledNonNaturalLoop"; break;
case kNotCompiledThrowCatchLoop : name = "NotCompiledThrowCatchLoop"; break;
- case kNotCompiledAmbiguousArrayGet : name = "NotCompiledAmbiguousArrayGet"; break;
+ case kNotCompiledAmbiguousArrayOp : name = "NotCompiledAmbiguousArrayOp"; break;
case kNotCompiledHugeMethod : name = "NotCompiledHugeMethod"; break;
case kNotCompiledLargeMethodNoBranches : name = "NotCompiledLargeMethodNoBranches"; break;
case kNotCompiledMalformedOpcode : name = "NotCompiledMalformedOpcode"; break;
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index d1770b7..63ef600 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -96,7 +96,7 @@
if (can_merge_with_load_class && !load_class->HasUses()) {
load_class->GetBlock()->RemoveInstruction(load_class);
}
- } else if (can_merge_with_load_class) {
+ } else if (can_merge_with_load_class && !load_class->NeedsAccessCheck()) {
// Pass the initialization duty to the `HLoadClass` instruction,
// and remove the instruction from the graph.
load_class->SetMustGenerateClinitCheck(true);
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 94a297c..1c25e48 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -56,6 +56,7 @@
void VisitInvoke(HInvoke* instr) OVERRIDE;
void VisitArrayGet(HArrayGet* instr) OVERRIDE;
void VisitCheckCast(HCheckCast* instr) OVERRIDE;
+ void VisitBoundType(HBoundType* instr) OVERRIDE;
void VisitNullCheck(HNullCheck* instr) OVERRIDE;
void VisitFakeString(HFakeString* instr) OVERRIDE;
void UpdateReferenceTypeInfo(HInstruction* instr,
@@ -124,91 +125,6 @@
}
}
-static void CheckHasNoTypedInputs(HInstruction* root_instr) {
- ArenaAllocatorAdapter<void> adapter =
- root_instr->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocReferenceTypePropagation);
-
- ArenaVector<HPhi*> visited_phis(adapter);
- ArenaVector<HInstruction*> worklist(adapter);
- worklist.push_back(root_instr);
-
- while (!worklist.empty()) {
- HInstruction* instr = worklist.back();
- worklist.pop_back();
-
- if (instr->IsPhi() || instr->IsBoundType() || instr->IsNullCheck()) {
- // Expect that both `root_instr` and its inputs have invalid RTI.
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(!instr->GetReferenceTypeInfo().IsValid()) << "Instruction should not have valid RTI.";
-
- // Insert all unvisited inputs to the worklist.
- for (HInputIterator it(instr); !it.Done(); it.Advance()) {
- HInstruction* input = it.Current();
- if (input->IsPhi()) {
- if (ContainsElement(visited_phis, input->AsPhi())) {
- continue;
- } else {
- visited_phis.push_back(input->AsPhi());
- }
- }
- worklist.push_back(input);
- }
- } else if (instr->IsNullConstant()) {
- // The only input of `root_instr` allowed to have valid RTI because it is ignored.
- } else {
- LOG(FATAL) << "Unexpected input " << instr->DebugName() << instr->GetId() << " with RTI "
- << instr->GetReferenceTypeInfo();
- UNREACHABLE();
- }
- }
-}
-
-template<typename Functor>
-static void ForEachUntypedInstruction(HGraph* graph, Functor fn) {
- ScopedObjectAccess soa(Thread::Current());
- for (HReversePostOrderIterator block_it(*graph); !block_it.Done(); block_it.Advance()) {
- for (HInstructionIterator it(block_it.Current()->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->AsPhi();
- // Note that the graph may contain dead phis when run from the SsaBuilder.
- // Skip those as they might have a type conflict and will be removed anyway.
- if (phi->IsLive() &&
- phi->GetType() == Primitive::kPrimNot &&
- !phi->GetReferenceTypeInfo().IsValid()) {
- fn(phi);
- }
- }
- for (HInstructionIterator it(block_it.Current()->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) {
- fn(instr);
- }
- }
- }
-}
-
-void ReferenceTypePropagation::SetUntypedInstructionsToObject() {
- // In some cases, the fix-point iteration will leave kPrimNot instructions with
- // invalid RTI because bytecode does not provide enough typing information.
- // Set the RTI of such instructions to Object.
- // Example:
- // MyClass a = null, b = null;
- // while (a == null) {
- // if (cond) { a = b; } else { b = a; }
- // }
-
- if (kIsDebugBuild) {
- // Test that if we are going to set RTI from invalid to Object, that
- // instruction did not have any typed instructions in its def-use chain
- // and therefore its type could not be inferred.
- ForEachUntypedInstruction(graph_, [](HInstruction* instr) { CheckHasNoTypedInputs(instr); });
- }
-
- ReferenceTypeInfo obj_rti = ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false);
- ForEachUntypedInstruction(graph_, [obj_rti](HInstruction* instr) {
- instr->SetReferenceTypeInfo(obj_rti);
- });
-}
-
void ReferenceTypePropagation::Run() {
// To properly propagate type info we need to visit in the dominator-based order.
// Reverse post order guarantees a node's dominators are visited first.
@@ -218,7 +134,6 @@
}
ProcessWorklist();
- SetUntypedInstructionsToObject();
ValidateTypes();
}
@@ -246,34 +161,6 @@
BoundTypeForIfInstanceOf(block);
}
-// Create a bound type for the given object narrowing the type as much as possible.
-// The BoundType upper values for the super type and can_be_null will be taken from
-// load_class.GetLoadedClassRTI() and upper_can_be_null.
-static HBoundType* CreateBoundType(ArenaAllocator* arena,
- HInstruction* obj,
- HLoadClass* load_class,
- bool upper_can_be_null)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
- ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
- DCHECK(class_rti.IsValid());
- HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null);
- // Narrow the type as much as possible.
- if (class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
- bound_type->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true));
- } else if (obj_rti.IsValid() && class_rti.IsSupertypeOf(obj_rti)) {
- bound_type->SetReferenceTypeInfo(obj_rti);
- } else {
- bound_type->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
- }
- if (upper_can_be_null) {
- bound_type->SetCanBeNull(obj->CanBeNull());
- }
- return bound_type;
-}
-
// Check if we should create a bound type for the given object at the specified
// position. Because of inlining and the fact we run RTP more than once and we
// might have a HBoundType already. If we do, we should not create a new one.
@@ -359,8 +246,8 @@
ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
object_class_handle_, /* is_exact */ true);
if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) {
- bound_type = new (graph_->GetArena()) HBoundType(
- obj, object_rti, /* bound_can_be_null */ false);
+ bound_type = new (graph_->GetArena()) HBoundType(obj);
+ bound_type->SetUpperBound(object_rti, /* bound_can_be_null */ false);
if (obj->GetReferenceTypeInfo().IsValid()) {
bound_type->SetReferenceTypeInfo(obj->GetReferenceTypeInfo());
}
@@ -494,11 +381,8 @@
ScopedObjectAccess soa(Thread::Current());
HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction();
if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) {
- bound_type = CreateBoundType(
- graph_->GetArena(),
- obj,
- load_class,
- false /* InstanceOf ensures the object is not null. */);
+ bound_type = new (graph_->GetArena()) HBoundType(obj);
+ bound_type->SetUpperBound(class_rti, /* InstanceOf fails for null. */ false);
instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point);
} else {
// We already have a bound type on the position we would need to insert
@@ -688,43 +572,61 @@
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true));
}
+void RTPVisitor::VisitBoundType(HBoundType* instr) {
+ ScopedObjectAccess soa(Thread::Current());
+
+ ReferenceTypeInfo class_rti = instr->GetUpperBound();
+ if (class_rti.IsValid()) {
+ // Narrow the type as much as possible.
+ HInstruction* obj = instr->InputAt(0);
+ ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
+ if (class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true));
+ } else if (obj_rti.IsValid()) {
+ if (class_rti.IsSupertypeOf(obj_rti)) {
+ // Object type is more specific.
+ instr->SetReferenceTypeInfo(obj_rti);
+ } else {
+ // Upper bound is more specific.
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
+ }
+ } else {
+ // Object not typed yet. Leave BoundType untyped for now rather than
+ // assign the type conservatively.
+ }
+ instr->SetCanBeNull(obj->CanBeNull() && instr->GetUpperCanBeNull());
+ } else {
+ // The owner of the BoundType was already visited. If the class is unresolved,
+ // the BoundType should have been removed from the data flow and this method
+ // should remove it from the graph.
+ DCHECK(!instr->HasUses());
+ instr->GetBlock()->RemoveInstruction(instr);
+ }
+}
+
void RTPVisitor::VisitCheckCast(HCheckCast* check_cast) {
+ ScopedObjectAccess soa(Thread::Current());
+
HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
- {
- ScopedObjectAccess soa(Thread::Current());
- if (!class_rti.IsValid()) {
- // He have loaded an unresolved class. Don't bother bounding the type.
- return;
- }
+ HBoundType* bound_type = check_cast->GetNext()->AsBoundType();
+ if (bound_type == nullptr || bound_type->GetUpperBound().IsValid()) {
+ // The next instruction is not an uninitialized BoundType. This must be
+ // an RTP pass after SsaBuilder and we do not need to do anything.
+ return;
}
- HInstruction* obj = check_cast->InputAt(0);
- HBoundType* bound_type = nullptr;
- for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) {
- HInstruction* user = it.Current()->GetUser();
- if (check_cast->StrictlyDominates(user)) {
- if (bound_type == nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- if (ShouldCreateBoundType(check_cast->GetNext(), obj, class_rti, check_cast, nullptr)) {
- bound_type = CreateBoundType(
- GetGraph()->GetArena(),
- obj,
- load_class,
- true /* CheckCast succeeds for nulls. */);
- check_cast->GetBlock()->InsertInstructionAfter(bound_type, check_cast);
- } else {
- // Update nullability of the existing bound type, which may not have known
- // that its input was not null when it was being created.
- bound_type = check_cast->GetNext()->AsBoundType();
- bound_type->SetCanBeNull(obj->CanBeNull());
- // We already have a bound type on the position we would need to insert
- // the new one. The existing bound type should dominate all the users
- // (dchecked) so there's no need to continue.
- break;
- }
- }
- user->ReplaceInput(bound_type, it.Current()->GetIndex());
- }
+ DCHECK_EQ(bound_type->InputAt(0), check_cast->InputAt(0));
+
+ if (class_rti.IsValid()) {
+ // This is the first run of RTP and class is resolved.
+ bound_type->SetUpperBound(class_rti, /* CheckCast succeeds for nulls. */ true);
+ } else {
+ // This is the first run of RTP and class is unresolved. Remove the binding.
+ // The instruction itself is removed in VisitBoundType so as to not
+ // invalidate HInstructionIterator.
+ bound_type->ReplaceWith(bound_type->InputAt(0));
}
}
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 21789e1..5c05592 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -57,7 +57,6 @@
SHARED_REQUIRES(Locks::mutator_lock_);
void ValidateTypes();
- void SetUntypedInstructionsToObject();
StackHandleScopeCollection* handles_;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 9e869e1..f6bab8e 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -154,7 +154,7 @@
Primitive::Type input_type = HPhi::ToPhiType(input->GetType());
if (common_type == input_type) {
// No change in type.
- } else if (Primitive::ComponentSize(common_type) != Primitive::ComponentSize(input_type)) {
+ } else if (Primitive::Is64BitType(common_type) != Primitive::Is64BitType(input_type)) {
// Types are of different sizes, e.g. int vs. long. Must be a conflict.
return false;
} else if (Primitive::IsIntegralType(common_type)) {
@@ -317,27 +317,15 @@
return equivalent;
}
-// Returns true if the array input of `aget` is either of type int[] or long[].
-// Should only be called on ArrayGets with ambiguous type (int/float, long/double)
-// on arrays which were typed to an array class by RTP.
-static bool IsArrayGetOnIntegralArray(HArrayGet* aget) SHARED_REQUIRES(Locks::mutator_lock_) {
- ReferenceTypeInfo array_type = aget->GetArray()->GetReferenceTypeInfo();
+static Primitive::Type GetPrimitiveArrayComponentType(HInstruction* array)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ReferenceTypeInfo array_type = array->GetReferenceTypeInfo();
DCHECK(array_type.IsPrimitiveArrayClass());
- ReferenceTypeInfo::TypeHandle array_type_handle = array_type.GetTypeHandle();
-
- bool is_integral_type;
- if (Primitive::Is64BitType(aget->GetType())) {
- is_integral_type = array_type_handle->GetComponentType()->IsPrimitiveLong();
- DCHECK(is_integral_type || array_type_handle->GetComponentType()->IsPrimitiveDouble());
- } else {
- is_integral_type = array_type_handle->GetComponentType()->IsPrimitiveInt();
- DCHECK(is_integral_type || array_type_handle->GetComponentType()->IsPrimitiveFloat());
- }
- return is_integral_type;
+ return array_type.GetTypeHandle()->GetComponentType()->GetPrimitiveType();
}
-bool SsaBuilder::FixAmbiguousArrayGets() {
- if (ambiguous_agets_.empty()) {
+bool SsaBuilder::FixAmbiguousArrayOps() {
+ if (ambiguous_agets_.empty() && ambiguous_asets_.empty()) {
return true;
}
@@ -351,13 +339,17 @@
ScopedObjectAccess soa(Thread::Current());
for (HArrayGet* aget_int : ambiguous_agets_) {
- if (!aget_int->GetArray()->GetReferenceTypeInfo().IsPrimitiveArrayClass()) {
+ HInstruction* array = aget_int->GetArray();
+ if (!array->GetReferenceTypeInfo().IsPrimitiveArrayClass()) {
// RTP did not type the input array. Bail.
return false;
}
HArrayGet* aget_float = FindFloatOrDoubleEquivalentOfArrayGet(aget_int);
- if (IsArrayGetOnIntegralArray(aget_int)) {
+ Primitive::Type array_type = GetPrimitiveArrayComponentType(array);
+ DCHECK_EQ(Primitive::Is64BitType(aget_int->GetType()), Primitive::Is64BitType(array_type));
+
+ if (Primitive::IsIntOrLongType(array_type)) {
if (aget_float != nullptr) {
// There is a float/double equivalent. We must replace it and re-run
// primitive type propagation on all dependent instructions.
@@ -366,6 +358,7 @@
AddDependentInstructionsToWorklist(aget_int, &worklist);
}
} else {
+ DCHECK(Primitive::IsFloatingPointType(array_type));
if (aget_float == nullptr) {
// This is a float/double ArrayGet but there were no typed uses which
// would create the typed equivalent. Create it now.
@@ -379,11 +372,47 @@
AddDependentInstructionsToWorklist(aget_float, &worklist);
}
}
- }
- // Set a flag stating that types of ArrayGets have been resolved. This is used
- // by GetFloatOrDoubleEquivalentOfArrayGet to report conflict.
- agets_fixed_ = true;
+ // Set a flag stating that types of ArrayGets have been resolved. Requesting
+ // equivalent of the wrong type with GetFloatOrDoubleEquivalentOfArrayGet
+ // will fail from now on.
+ agets_fixed_ = true;
+
+ for (HArraySet* aset : ambiguous_asets_) {
+ HInstruction* array = aset->GetArray();
+ if (!array->GetReferenceTypeInfo().IsPrimitiveArrayClass()) {
+ // RTP did not type the input array. Bail.
+ return false;
+ }
+
+ HInstruction* value = aset->GetValue();
+ Primitive::Type value_type = value->GetType();
+ Primitive::Type array_type = GetPrimitiveArrayComponentType(array);
+ DCHECK_EQ(Primitive::Is64BitType(value_type), Primitive::Is64BitType(array_type));
+
+ if (Primitive::IsFloatingPointType(array_type)) {
+ if (!Primitive::IsFloatingPointType(value_type)) {
+ DCHECK(Primitive::IsIntegralType(value_type));
+ // Array elements are floating-point but the value has not been replaced
+ // with its floating-point equivalent. The replacement must always
+ // succeed in code validated by the verifier.
+ HInstruction* equivalent = GetFloatOrDoubleEquivalent(value, array_type);
+ DCHECK(equivalent != nullptr);
+ aset->ReplaceInput(equivalent, /* input_index */ 2);
+ if (equivalent->IsPhi()) {
+ // Returned equivalent is a phi which may not have had its inputs
+ // replaced yet. We need to run primitive type propagation on it.
+ worklist.push_back(equivalent->AsPhi());
+ }
+ }
+ } else {
+ // Array elements are integral and the value assigned to it initially
+ // was integral too. Nothing to do.
+ DCHECK(Primitive::IsIntegralType(array_type));
+ DCHECK(Primitive::IsIntegralType(value_type));
+ }
+ }
+ }
if (!worklist.empty()) {
ProcessPrimitiveTypePropagationWorklist(&worklist);
@@ -429,10 +458,11 @@
ReferenceTypePropagation(GetGraph(), handles_).Run();
// 7) Step 1) duplicated ArrayGet instructions with ambiguous type (int/float
- // or long/double). Now that RTP computed the type of the array input, the
- // ambiguity can be resolved and the correct equivalent kept.
- if (!FixAmbiguousArrayGets()) {
- return kBuildSsaFailAmbiguousArrayGet;
+ // or long/double) and marked ArraySets with ambiguous input type. Now that RTP
+ // computed the type of the array input, the ambiguity can be resolved and the
+ // correct equivalents kept.
+ if (!FixAmbiguousArrayOps()) {
+ return kBuildSsaFailAmbiguousArrayOp;
}
// 8) Mark dead phis. This will mark phis which are not used by instructions
@@ -702,7 +732,7 @@
// int/long. Requesting a float/double equivalent should lead to a conflict.
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
- DCHECK(IsArrayGetOnIntegralArray(aget));
+ DCHECK(Primitive::IsIntOrLongType(GetPrimitiveArrayComponentType(aget->GetArray())));
}
return nullptr;
} else {
@@ -847,4 +877,12 @@
VisitInstruction(aget);
}
+void SsaBuilder::VisitArraySet(HArraySet* aset) {
+ Primitive::Type type = aset->GetValue()->GetType();
+ if (Primitive::IsIntOrLongType(type)) {
+ ambiguous_asets_.push_back(aset);
+ }
+ VisitInstruction(aset);
+}
+
} // namespace art
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index ed6f5ca..0fcc3a1 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -56,6 +56,7 @@
current_locals_(nullptr),
loop_headers_(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)),
ambiguous_agets_(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)),
+ ambiguous_asets_(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)),
locals_for_(graph->GetBlocks().size(),
ArenaVector<HInstruction*>(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)),
graph->GetArena()->Adapter(kArenaAllocSsaBuilder)) {
@@ -75,6 +76,7 @@
void VisitInstruction(HInstruction* instruction);
void VisitTemporary(HTemporary* instruction);
void VisitArrayGet(HArrayGet* aget);
+ void VisitArraySet(HArraySet* aset);
static constexpr const char* kSsaBuilderPassName = "ssa_builder";
@@ -85,10 +87,10 @@
void EquivalentPhisCleanup();
void RunPrimitiveTypePropagation();
- // Attempts to resolve types of aget and aget-wide instructions from reference
- // type information on the input array. Returns false if the type of the array
- // is unknown.
- bool FixAmbiguousArrayGets();
+ // Attempts to resolve types of aget(-wide) instructions and type values passed
+ // to aput(-wide) instructions from reference type information on the array
+ // input. Returns false if the type of an array is unknown.
+ bool FixAmbiguousArrayOps();
bool TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist);
bool UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist);
@@ -115,6 +117,7 @@
ArenaVector<HBasicBlock*> loop_headers_;
ArenaVector<HArrayGet*> ambiguous_agets_;
+ ArenaVector<HArraySet*> ambiguous_asets_;
// HEnvironment for each block.
ArenaVector<ArenaVector<HInstruction*>> locals_for_;
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 63aba88..2eef307 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -17,6 +17,7 @@
#include "ssa_phi_elimination.h"
#include "base/arena_containers.h"
+#include "base/bit_vector-inl.h"
namespace art {
@@ -129,6 +130,9 @@
}
}
+ ArenaSet<uint32_t> visited_phis_in_cycle(graph_->GetArena()->Adapter());
+ ArenaVector<HPhi*> cycle_worklist(graph_->GetArena()->Adapter());
+
while (!worklist_.empty()) {
HPhi* phi = worklist_.back();
worklist_.pop_back();
@@ -143,46 +147,92 @@
continue;
}
- // Find if the inputs of the phi are the same instruction.
- HInstruction* candidate = phi->InputAt(0);
- // A loop phi cannot have itself as the first phi. Note that this
- // check relies on our simplification pass ensuring the pre-header
- // block is first in the list of predecessors of the loop header.
- DCHECK(!phi->IsLoopHeaderPhi() || phi->GetBlock()->IsLoopPreHeaderFirstPredecessor());
- DCHECK_NE(phi, candidate);
+ HInstruction* candidate = nullptr;
+ visited_phis_in_cycle.clear();
+ cycle_worklist.clear();
- for (size_t i = 1; i < phi->InputCount(); ++i) {
- HInstruction* input = phi->InputAt(i);
- // For a loop phi, if the input is the phi, the phi is still candidate for
- // elimination.
- if (input != candidate && input != phi) {
+ cycle_worklist.push_back(phi);
+ visited_phis_in_cycle.insert(phi->GetId());
+ bool catch_phi_in_cycle = phi->IsCatchPhi();
+
+ // First do a simple loop over inputs and check if they are all the same.
+ for (size_t j = 0; j < phi->InputCount(); ++j) {
+ HInstruction* input = phi->InputAt(j);
+ if (input == phi) {
+ continue;
+ } else if (candidate == nullptr) {
+ candidate = input;
+ } else if (candidate != input) {
candidate = nullptr;
break;
}
}
- // If the inputs are not the same, continue.
+ // If we haven't found a candidate, check for a phi cycle. Note that we need to detect
+ // such cycles to avoid having reference and non-reference equivalents. We check this
+ // invariant in the graph checker.
+ if (candidate == nullptr) {
+ // We iterate over the array as long as it grows.
+ for (size_t i = 0; i < cycle_worklist.size(); ++i) {
+ HPhi* current = cycle_worklist[i];
+ DCHECK(!current->IsLoopHeaderPhi() ||
+ current->GetBlock()->IsLoopPreHeaderFirstPredecessor());
+
+ for (size_t j = 0; j < current->InputCount(); ++j) {
+ HInstruction* input = current->InputAt(j);
+ if (input == current) {
+ continue;
+ } else if (input->IsPhi()) {
+ if (!ContainsElement(visited_phis_in_cycle, input->GetId())) {
+ cycle_worklist.push_back(input->AsPhi());
+ visited_phis_in_cycle.insert(input->GetId());
+ catch_phi_in_cycle |= input->AsPhi()->IsCatchPhi();
+ } else {
+ // Already visited, nothing to do.
+ }
+ } else if (candidate == nullptr) {
+ candidate = input;
+ } else if (candidate != input) {
+ candidate = nullptr;
+ // Clear the cycle worklist to break out of the outer loop.
+ cycle_worklist.clear();
+ break;
+ }
+ }
+ }
+ }
+
if (candidate == nullptr) {
continue;
}
- // The candidate may not dominate a phi in a catch block.
- if (phi->IsCatchPhi() && !candidate->StrictlyDominates(phi)) {
- continue;
- }
-
- // Because we're updating the users of this phi, we may have new candidates
- // for elimination. Add phis that use this phi to the worklist.
- for (HUseIterator<HInstruction*> it(phi->GetUses()); !it.Done(); it.Advance()) {
- HUseListNode<HInstruction*>* current = it.Current();
- HInstruction* user = current->GetUser();
- if (user->IsPhi()) {
- worklist_.push_back(user->AsPhi());
+ for (HPhi* current : cycle_worklist) {
+ // The candidate may not dominate a phi in a catch block: there may be non-throwing
+ // instructions at the beginning of a try range, that may be the first input of
+ // catch phis.
+ // TODO(dbrazdil): Remove this situation by moving those non-throwing instructions
+ // before the try entry.
+ if (catch_phi_in_cycle) {
+ if (!candidate->StrictlyDominates(current)) {
+ continue;
+ }
+ } else {
+ DCHECK(candidate->StrictlyDominates(current));
}
- }
- phi->ReplaceWith(candidate);
- phi->GetBlock()->RemovePhi(phi);
+ // Because we're updating the users of this phi, we may have new candidates
+ // for elimination. Add phis that use this phi to the worklist.
+ for (HUseIterator<HInstruction*> it(current->GetUses()); !it.Done(); it.Advance()) {
+ HUseListNode<HInstruction*>* use = it.Current();
+ HInstruction* user = use->GetUser();
+ if (user->IsPhi() && !ContainsElement(visited_phis_in_cycle, user->GetId())) {
+ worklist_.push_back(user->AsPhi());
+ }
+ }
+ DCHECK(candidate->StrictlyDominates(current));
+ current->ReplaceWith(candidate);
+ current->GetBlock()->RemovePhi(current);
+ }
}
}
diff --git a/compiler/profile_assistant.cc b/compiler/profile_assistant.cc
new file mode 100644
index 0000000..81f2a56
--- /dev/null
+++ b/compiler/profile_assistant.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "profile_assistant.h"
+
+namespace art {
+
+// Minimum number of new methods that profiles must contain to enable recompilation.
+static constexpr const uint32_t kMinNewMethodsForCompilation = 10;
+
+bool ProfileAssistant::ProcessProfiles(
+ const std::vector<std::string>& profile_files,
+ const std::vector<std::string>& reference_profile_files,
+ /*out*/ ProfileCompilationInfo** profile_compilation_info) {
+ DCHECK(!profile_files.empty());
+ DCHECK(reference_profile_files.empty() ||
+ (profile_files.size() == reference_profile_files.size()));
+
+ std::vector<ProfileCompilationInfo> new_info(profile_files.size());
+ bool should_compile = false;
+ // Read the main profile files.
+ for (size_t i = 0; i < profile_files.size(); i++) {
+ if (!new_info[i].Load(profile_files[i])) {
+ LOG(WARNING) << "Could not load profile file: " << profile_files[i];
+ return false;
+ }
+ // Do we have enough new profiled methods that will make the compilation worthwhile?
+ should_compile |= (new_info[i].GetNumberOfMethods() > kMinNewMethodsForCompilation);
+ }
+ if (!should_compile) {
+ *profile_compilation_info = nullptr;
+ return true;
+ }
+
+ std::unique_ptr<ProfileCompilationInfo> result(new ProfileCompilationInfo());
+ for (size_t i = 0; i < new_info.size(); i++) {
+ // Merge all data into a single object.
+ result->Load(new_info[i]);
+ // If we have any reference profile information merge their information with
+ // the current profiles and save them back to disk.
+ if (!reference_profile_files.empty()) {
+ if (!new_info[i].Load(reference_profile_files[i])) {
+ LOG(WARNING) << "Could not load reference profile file: " << reference_profile_files[i];
+ return false;
+ }
+ if (!new_info[i].Save(reference_profile_files[i])) {
+ LOG(WARNING) << "Could not save reference profile file: " << reference_profile_files[i];
+ return false;
+ }
+ }
+ }
+ *profile_compilation_info = result.release();
+ return true;
+}
+
+} // namespace art
diff --git a/compiler/profile_assistant.h b/compiler/profile_assistant.h
new file mode 100644
index 0000000..088c8bd
--- /dev/null
+++ b/compiler/profile_assistant.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_PROFILE_ASSISTANT_H_
+#define ART_COMPILER_PROFILE_ASSISTANT_H_
+
+#include <string>
+#include <vector>
+
+#include "jit/offline_profiling_info.cc"
+
+namespace art {
+
+class ProfileAssistant {
+ public:
+ // Process the profile information present in the given files. Returns true
+ // if the analysis ended up successfully (i.e. no errors during reading,
+ // merging or writing of profile files).
+ //
+ // If the returned value is true and there is a significant difference between
+ // profile_files and reference_profile_files:
+ // - profile_compilation_info is set to a not null object that
+ // can be used to drive compilation. It will be the merge of all the data
+ // found in profile_files and reference_profile_files.
+ // - the data from profile_files[i] is merged into
+ // reference_profile_files[i] and the corresponding backing file is
+ // updated.
+ //
+ // If the returned value is false or the difference is insignificant,
+ // profile_compilation_info will be set to null.
+ //
+ // Additional notes:
+ // - as mentioned above, this function may update the content of the files
+ // passed with the reference_profile_files.
+ // - if reference_profile_files is not empty it must be the same size as
+ // profile_files.
+ static bool ProcessProfiles(
+ const std::vector<std::string>& profile_files,
+ const std::vector<std::string>& reference_profile_files,
+ /*out*/ ProfileCompilationInfo** profile_compilation_info);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProfileAssistant);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_PROFILE_ASSISTANT_H_
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index afca8ad..0dc307c 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -400,6 +400,20 @@
EmitR(0, rs, rt, rd, 0, 0x07);
}
+void MipsAssembler::Ext(Register rd, Register rt, int pos, int size) {
+ CHECK(IsUint<5>(pos)) << pos;
+ CHECK(0 < size && size <= 32) << size;
+ CHECK(0 < pos + size && pos + size <= 32) << pos << " + " << size;
+ EmitR(0x1f, rt, rd, static_cast<Register>(size - 1), pos, 0x00);
+}
+
+void MipsAssembler::Ins(Register rd, Register rt, int pos, int size) {
+ CHECK(IsUint<5>(pos)) << pos;
+ CHECK(0 < size && size <= 32) << size;
+ CHECK(0 < pos + size && pos + size <= 32) << pos << " + " << size;
+ EmitR(0x1f, rt, rd, static_cast<Register>(pos + size - 1), pos, 0x04);
+}
+
void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) {
EmitI(0x20, rs, rt, imm16);
}
@@ -1121,8 +1135,14 @@
}
void MipsAssembler::LoadConst64(Register reg_hi, Register reg_lo, int64_t value) {
- LoadConst32(reg_lo, Low32Bits(value));
- LoadConst32(reg_hi, High32Bits(value));
+ uint32_t low = Low32Bits(value);
+ uint32_t high = High32Bits(value);
+ LoadConst32(reg_lo, low);
+ if (high != low) {
+ LoadConst32(reg_hi, high);
+ } else {
+ Move(reg_hi, reg_lo);
+ }
}
void MipsAssembler::StoreConst32ToOffset(int32_t value,
@@ -1136,7 +1156,11 @@
base = AT;
offset = 0;
}
- LoadConst32(temp, value);
+ if (value == 0) {
+ temp = ZERO;
+ } else {
+ LoadConst32(temp, value);
+ }
Sw(temp, base, offset);
}
@@ -1152,22 +1176,48 @@
base = AT;
offset = 0;
}
- LoadConst32(temp, Low32Bits(value));
- Sw(temp, base, offset);
- LoadConst32(temp, High32Bits(value));
- Sw(temp, base, offset + kMipsWordSize);
+ uint32_t low = Low32Bits(value);
+ uint32_t high = High32Bits(value);
+ if (low == 0) {
+ Sw(ZERO, base, offset);
+ } else {
+ LoadConst32(temp, low);
+ Sw(temp, base, offset);
+ }
+ if (high == 0) {
+ Sw(ZERO, base, offset + kMipsWordSize);
+ } else {
+ if (high != low) {
+ LoadConst32(temp, high);
+ }
+ Sw(temp, base, offset + kMipsWordSize);
+ }
}
void MipsAssembler::LoadSConst32(FRegister r, int32_t value, Register temp) {
- LoadConst32(temp, value);
+ if (value == 0) {
+ temp = ZERO;
+ } else {
+ LoadConst32(temp, value);
+ }
Mtc1(temp, r);
}
void MipsAssembler::LoadDConst64(FRegister rd, int64_t value, Register temp) {
- LoadConst32(temp, Low32Bits(value));
- Mtc1(temp, rd);
- LoadConst32(temp, High32Bits(value));
- Mthc1(temp, rd);
+ uint32_t low = Low32Bits(value);
+ uint32_t high = High32Bits(value);
+ if (low == 0) {
+ Mtc1(ZERO, rd);
+ } else {
+ LoadConst32(temp, low);
+ Mtc1(temp, rd);
+ }
+ if (high == 0) {
+ Mthc1(ZERO, rd);
+ } else {
+ LoadConst32(temp, high);
+ Mthc1(temp, rd);
+ }
}
void MipsAssembler::Addiu32(Register rt, Register rs, int32_t value, Register temp) {
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index f569aa8..066e7b0 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -156,6 +156,8 @@
void Srlv(Register rd, Register rt, Register rs);
void Rotrv(Register rd, Register rt, Register rs); // R2+
void Srav(Register rd, Register rt, Register rs);
+ void Ext(Register rd, Register rt, int pos, int size); // R2+
+ void Ins(Register rd, Register rt, int pos, int size); // R2+
void Lb(Register rt, Register rs, uint16_t imm16);
void Lh(Register rt, Register rs, uint16_t imm16);
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 6f8b3e8..4361843 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -367,6 +367,44 @@
DriverStr(RepeatRRR(&mips::MipsAssembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "Srav");
}
+TEST_F(AssemblerMIPSTest, Ins) {
+ std::vector<mips::Register*> regs = GetRegisters();
+ WarnOnCombinations(regs.size() * regs.size() * 33 * 16);
+ std::string expected;
+ for (mips::Register* reg1 : regs) {
+ for (mips::Register* reg2 : regs) {
+ for (int32_t pos = 0; pos < 32; pos++) {
+ for (int32_t size = 1; pos + size <= 32; size++) {
+ __ Ins(*reg1, *reg2, pos, size);
+ std::ostringstream instr;
+ instr << "ins $" << *reg1 << ", $" << *reg2 << ", " << pos << ", " << size << "\n";
+ expected += instr.str();
+ }
+ }
+ }
+ }
+ DriverStr(expected, "Ins");
+}
+
+TEST_F(AssemblerMIPSTest, Ext) {
+ std::vector<mips::Register*> regs = GetRegisters();
+ WarnOnCombinations(regs.size() * regs.size() * 33 * 16);
+ std::string expected;
+ for (mips::Register* reg1 : regs) {
+ for (mips::Register* reg2 : regs) {
+ for (int32_t pos = 0; pos < 32; pos++) {
+ for (int32_t size = 1; pos + size <= 32; size++) {
+ __ Ext(*reg1, *reg2, pos, size);
+ std::ostringstream instr;
+ instr << "ext $" << *reg1 << ", $" << *reg2 << ", " << pos << ", " << size << "\n";
+ expected += instr.str();
+ }
+ }
+ }
+ }
+ DriverStr(expected, "Ext");
+}
+
TEST_F(AssemblerMIPSTest, Lb) {
DriverStr(RepeatRRIb(&mips::MipsAssembler::Lb, -16, "lb ${reg1}, {imm}(${reg2})"), "Lb");
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 50480d9..c4f68ea 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -63,6 +63,7 @@
#include "gc/space/space-inl.h"
#include "image_writer.h"
#include "interpreter/unstarted_runtime.h"
+#include "jit/offline_profiling_info.h"
#include "leb128.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -70,6 +71,7 @@
#include "mirror/object_array-inl.h"
#include "oat_writer.h"
#include "os.h"
+#include "profile_assistant.h"
#include "runtime.h"
#include "runtime_options.h"
#include "ScopedLocalRef.h"
@@ -328,6 +330,16 @@
UsageError(" Example: --runtime-arg -Xms256m");
UsageError("");
UsageError(" --profile-file=<filename>: specify profiler output file to use for compilation.");
+ UsageError(" Can be specified multiple time, in which case the data from the different");
+ UsageError(" profiles will be aggregated.");
+ UsageError("");
+ UsageError(" --reference-profile-file=<filename>: specify a reference profile file to use when");
+ UsageError(" compiling. The data in this file will be compared with the data in the");
+ UsageError(" associated --profile-file and the compilation will proceed only if there is");
+ UsageError(" a significant difference (--reference-profile-file is paired with");
+ UsageError(" --profile-file in the natural order). If the compilation was attempted then");
+ UsageError(" --profile-file will be merged into --reference-profile-file. Valid only when");
+ UsageError(" specified together with --profile-file.");
UsageError("");
UsageError(" --print-pass-names: print a list of pass names");
UsageError("");
@@ -767,6 +779,13 @@
}
}
+ if (!profile_files_.empty()) {
+ if (!reference_profile_files_.empty() &&
+ (reference_profile_files_.size() != profile_files_.size())) {
+ Usage("If specified, --reference-profile-file should match the number of --profile-file.");
+ }
+ }
+
if (!parser_options->oat_symbols.empty()) {
oat_unstripped_ = std::move(parser_options->oat_symbols);
}
@@ -1057,8 +1076,10 @@
} else if (option.starts_with("--compiler-backend=")) {
ParseCompilerBackend(option, parser_options.get());
} else if (option.starts_with("--profile-file=")) {
- profile_file_ = option.substr(strlen("--profile-file=")).data();
- VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
+ profile_files_.push_back(option.substr(strlen("--profile-file=")).ToString());
+ } else if (option.starts_with("--reference-profile-file=")) {
+ reference_profile_files_.push_back(
+ option.substr(strlen("--reference-profile-file=")).ToString());
} else if (option == "--no-profile-file") {
// No profile
} else if (option == "--host") {
@@ -1479,9 +1500,8 @@
dump_cfg_append_,
compiler_phases_timings_.get(),
swap_fd_,
- profile_file_,
- &dex_file_oat_filename_map_));
-
+ &dex_file_oat_filename_map_,
+ profile_compilation_info_.get()));
driver_->SetDexFilesForOatFile(dex_files_);
driver_->CompileAll(class_loader, dex_files_, timings_);
}
@@ -1790,6 +1810,26 @@
return is_host_;
}
+ bool UseProfileGuidedCompilation() const {
+ return !profile_files_.empty();
+ }
+
+ bool ProcessProfiles() {
+ DCHECK(UseProfileGuidedCompilation());
+ ProfileCompilationInfo* info = nullptr;
+ if (ProfileAssistant::ProcessProfiles(profile_files_, reference_profile_files_, &info)) {
+ profile_compilation_info_.reset(info);
+ return true;
+ }
+ return false;
+ }
+
+ bool ShouldCompileBasedOnProfiles() const {
+ DCHECK(UseProfileGuidedCompilation());
+ // If we are given profiles, compile only if we have new information.
+ return profile_compilation_info_ != nullptr;
+ }
+
private:
template <typename T>
static std::vector<T*> MakeNonOwningPointerVector(const std::vector<std::unique_ptr<T>>& src) {
@@ -2263,7 +2303,9 @@
int swap_fd_;
std::string app_image_file_name_;
int app_image_fd_;
- std::string profile_file_; // Profile file to use
+ std::vector<std::string> profile_files_;
+ std::vector<std::string> reference_profile_files_;
+ std::unique_ptr<ProfileCompilationInfo> profile_compilation_info_;
TimingLogger* timings_;
std::unique_ptr<CumulativeLogger> compiler_phases_timings_;
std::vector<std::vector<const DexFile*>> dex_files_per_oat_file_;
@@ -2380,6 +2422,20 @@
// Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
dex2oat.ParseArgs(argc, argv);
+ // Process profile information and assess if we need to do a profile guided compilation.
+ // This operation involves I/O.
+ if (dex2oat.UseProfileGuidedCompilation()) {
+ if (dex2oat.ProcessProfiles()) {
+ if (!dex2oat.ShouldCompileBasedOnProfiles()) {
+ LOG(INFO) << "Skipped compilation because of insignificant profile delta";
+ return EXIT_SUCCESS;
+ }
+ } else {
+ LOG(WARNING) << "Failed to process profile files";
+ return EXIT_FAILURE;
+ }
+ }
+
// Check early that the result of compilation can be written
if (!dex2oat.OpenFile()) {
return EXIT_FAILURE;
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index cd64a4f..ee7b21c 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -150,7 +150,9 @@
{ kSpecial2Mask | 0x3f, (28 << kOpcodeShift) | 0x3f, "sdbbp", "" }, // TODO: code
// SPECIAL3
+ { kSpecial3Mask | 0x3f, (31 << kOpcodeShift), "ext", "TSAZ", },
{ kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 3, "dext", "TSAZ", },
+ { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 4, "ins", "TSAz", },
{ kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f,
(31 << kOpcodeShift) | (16 << 6) | 32,
"seb",
@@ -421,7 +423,7 @@
opcode = gMipsInstructions[i].name;
for (const char* args_fmt = gMipsInstructions[i].args_fmt; *args_fmt; ++args_fmt) {
switch (*args_fmt) {
- case 'A': // sa (shift amount or [d]ext position).
+ case 'A': // sa (shift amount or [d]ins/[d]ext position).
args << sa;
break;
case 'B': // Branch offset.
@@ -519,7 +521,8 @@
case 's': args << 'f' << rs; break;
case 'T': args << 'r' << rt; break;
case 't': args << 'f' << rt; break;
- case 'Z': args << rd; break; // sz ([d]ext size).
+ case 'Z': args << (rd + 1); break; // sz ([d]ext size).
+ case 'z': args << (rd - sa + 1); break; // sz ([d]ins size).
}
if (*(args_fmt + 1)) {
args << ", ";
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index b403abd..d836532 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -150,100 +150,6 @@
}
}
-bool PatchOat::Patch(const std::string& image_location, off_t delta,
- File* output_image, InstructionSet isa,
- TimingLogger* timings) {
- CHECK(Runtime::Current() == nullptr);
- CHECK(output_image != nullptr);
- CHECK_GE(output_image->Fd(), 0);
- CHECK(!image_location.empty()) << "image file must have a filename.";
- CHECK_NE(isa, kNone);
-
- TimingLogger::ScopedTiming t("Runtime Setup", timings);
- const char *isa_name = GetInstructionSetString(isa);
- std::string image_filename;
- if (!LocationToFilename(image_location, isa, &image_filename)) {
- LOG(ERROR) << "Unable to find image at location " << image_location;
- return false;
- }
- std::unique_ptr<File> input_image(OS::OpenFileForReading(image_filename.c_str()));
- if (input_image.get() == nullptr) {
- LOG(ERROR) << "unable to open input image file at " << image_filename
- << " for location " << image_location;
- return false;
- }
-
- int64_t image_len = input_image->GetLength();
- if (image_len < 0) {
- LOG(ERROR) << "Error while getting image length";
- return false;
- }
- ImageHeader image_header;
- if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header),
- sizeof(image_header), 0)) {
- LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath();
- return false;
- }
-
- if (image_header.GetStorageMode() != ImageHeader::kStorageModeUncompressed) {
- LOG(ERROR) << "Patchoat is not supported with compressed image files "
- << input_image->GetPath();
- return false;
- }
-
- /*bool is_image_pic = */IsImagePic(image_header, input_image->GetPath());
- // Nothing special to do right now since the image always needs to get patched.
- // Perhaps in some far-off future we may have images with relative addresses that are true-PIC.
-
- // Set up the runtime
- RuntimeOptions options;
- NoopCompilerCallbacks callbacks;
- options.push_back(std::make_pair("compilercallbacks", &callbacks));
- std::string img = "-Ximage:" + image_location;
- options.push_back(std::make_pair(img.c_str(), nullptr));
- options.push_back(std::make_pair("imageinstructionset", reinterpret_cast<const void*>(isa_name)));
- options.push_back(std::make_pair("-Xno-sig-chain", nullptr));
- if (!Runtime::Create(options, false)) {
- LOG(ERROR) << "Unable to initialize runtime";
- return false;
- }
- // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
- // give it away now and then switch to a more manageable ScopedObjectAccess.
- Thread::Current()->TransitionFromRunnableToSuspended(kNative);
- ScopedObjectAccess soa(Thread::Current());
-
- t.NewTiming("Image and oat Patching setup");
- // Create the map where we will write the image patches to.
- std::string error_msg;
- std::unique_ptr<MemMap> image(MemMap::MapFile(image_len,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- input_image->Fd(),
- 0,
- /*low_4gb*/false,
- input_image->GetPath().c_str(),
- &error_msg));
- if (image.get() == nullptr) {
- LOG(ERROR) << "unable to map image file " << input_image->GetPath() << " : " << error_msg;
- return false;
- }
- // TODO: Support multi-image when patchoat is only patching images. Ever used? b/26317072
- gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetBootImageSpaces()[0];
-
- PatchOat p(isa, image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(), delta, timings);
- t.NewTiming("Patching files");
- if (!p.PatchImage(true)) {
- LOG(ERROR) << "Failed to patch image file " << input_image->GetPath();
- return false;
- }
-
- t.NewTiming("Writing files");
- if (!p.WriteImage(output_image)) {
- return false;
- }
- return true;
-}
-
bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t delta,
File* output_oat, File* output_image, InstructionSet isa,
TimingLogger* timings,
@@ -634,7 +540,7 @@
// Note that we require that ReadFromMemory does not make an internal copy of the elements.
// This also relies on visit roots not doing any verification which could fail after we update
// the roots to be the image addresses.
- temp_table.ReadFromMemory(image_->Begin() + section.Offset());
+ temp_table.AddTableFromMemory(image_->Begin() + section.Offset());
FixupRootVisitor visitor(this);
temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots);
}
@@ -765,8 +671,6 @@
void PatchOat::PatchVisitor::operator() (mirror::Object* obj, MemberOffset off,
bool is_static_unused ATTRIBUTE_UNUSED) const {
mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
- // TODO: Modify check for multi-image support? b/26317072
- // DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
@@ -775,8 +679,7 @@
mirror::Reference* ref) const {
MemberOffset off = mirror::Reference::ReferentOffset();
mirror::Object* referent = ref->GetReferent();
- // TODO: Modify check for multi-image support? b/26317072
- // DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
+ DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
@@ -1271,8 +1174,12 @@
bool have_image_files = have_output_image;
bool have_oat_files = have_output_oat;
- if (!have_oat_files && !have_image_files) {
- Usage("Must be patching either an oat or an image file or both.");
+ if (!have_oat_files) {
+ if (have_image_files) {
+ Usage("Cannot patch an image file without an oat file");
+ } else {
+ Usage("Must be patching either an oat file or an image file with an oat file.");
+ }
}
if (!have_oat_files && !isa_set) {
@@ -1507,10 +1414,6 @@
output_oat_fd >= 0, // was it opened from FD?
new_oat_out);
ret = FinishFile(output_oat.get(), ret);
- } else if (have_image_files) {
- TimingLogger::ScopedTiming pt("patch image", &timings);
- ret = PatchOat::Patch(input_image_location, base_delta, output_image.get(), isa, &timings);
- ret = FinishFile(output_image.get(), ret);
} else {
CHECK(false);
ret = true;
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index cb0d14b..ceddc34 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -133,12 +133,11 @@
if (obj == nullptr) {
return nullptr;
}
- // TODO: Fix these checks for multi-image. Some may still be valid. b/26317072
- // DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin()));
- // DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End()));
+ DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin()));
+ DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End()));
uintptr_t heap_off =
reinterpret_cast<uintptr_t>(obj) - reinterpret_cast<uintptr_t>(heap_->Begin());
- // DCHECK_LT(heap_off, image_->Size());
+ DCHECK_LT(heap_off, image_->Size());
return reinterpret_cast<T*>(image_->Begin() + heap_off);
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index de4314c..14e5ec9 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -371,12 +371,6 @@
LIBART_TARGET_CFLAGS :=
LIBART_HOST_CFLAGS :=
-ifeq ($(MALLOC_IMPL),dlmalloc)
- LIBART_TARGET_CFLAGS += -DUSE_DLMALLOC
-else
- LIBART_TARGET_CFLAGS += -DUSE_JEMALLOC
-endif
-
# Default dex2oat instruction set features.
LIBART_HOST_DEFAULT_INSTRUCTION_SET_FEATURES := default
LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := default
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index effa1c5..6f36016 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -24,7 +24,6 @@
#include "debugger.h"
#include "dex_file-inl.h"
#include "dex_instruction.h"
-#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0518911..67458cc 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -736,7 +736,7 @@
static void SanityCheckArtMethod(ArtMethod* m,
mirror::Class* expected_class,
- std::vector<gc::space::ImageSpace*> spaces)
+ std::vector<gc::space::ImageSpace*>& spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
if (m->IsRuntimeMethod()) {
CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m);
@@ -760,7 +760,7 @@
static void SanityCheckArtMethodPointerArray(mirror::PointerArray* arr,
mirror::Class* expected_class,
size_t pointer_size,
- std::vector<gc::space::ImageSpace*> spaces)
+ std::vector<gc::space::ImageSpace*>& spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(arr != nullptr);
for (int32_t j = 0; j < arr->GetLength(); ++j) {
@@ -775,27 +775,32 @@
}
}
-/* TODO: Modify check to support multiple image spaces and reenable. b/26317072
-static void SanityCheckArtMethodPointerArray(
- ArtMethod** arr,
- size_t size,
- size_t pointer_size,
- gc::space::ImageSpace* space) SHARED_REQUIRES(Locks::mutator_lock_) {
+static void SanityCheckArtMethodPointerArray(ArtMethod** arr,
+ size_t size,
+ size_t pointer_size,
+ std::vector<gc::space::ImageSpace*>& spaces)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK_EQ(arr != nullptr, size != 0u);
if (arr != nullptr) {
- auto offset = reinterpret_cast<uint8_t*>(arr) - space->Begin();
- CHECK(space->GetImageHeader().GetImageSection(
- ImageHeader::kSectionDexCacheArrays).Contains(offset));
+ bool contains = false;
+ for (auto space : spaces) {
+ auto offset = reinterpret_cast<uint8_t*>(arr) - space->Begin();
+ if (space->GetImageHeader().GetImageSection(
+ ImageHeader::kSectionDexCacheArrays).Contains(offset)) {
+ contains = true;
+ break;
+ }
+ }
+ CHECK(contains);
}
for (size_t j = 0; j < size; ++j) {
ArtMethod* method = mirror::DexCache::GetElementPtrSize(arr, j, pointer_size);
// expected_class == null means we are a dex cache.
if (method != nullptr) {
- SanityCheckArtMethod(method, nullptr, space);
+ SanityCheckArtMethod(method, nullptr, spaces);
}
}
}
-*/
static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -1018,13 +1023,12 @@
return false;
}
- // TODO: Modify check to support multiple image spaces and reenable.
-// if (kSanityCheckObjects) {
-// SanityCheckArtMethodPointerArray(dex_cache->GetResolvedMethods(),
-// dex_cache->NumResolvedMethods(),
-// image_pointer_size_,
-// spaces);
-// }
+ if (kSanityCheckObjects) {
+ SanityCheckArtMethodPointerArray(dex_cache->GetResolvedMethods(),
+ dex_cache->NumResolvedMethods(),
+ image_pointer_size_,
+ spaces);
+ }
if (dex_file->GetLocationChecksum() != oat_dex_file->GetDexFileLocationChecksum()) {
*error_msg = StringPrintf("Checksums do not match for %s: %x vs %x",
@@ -1109,6 +1113,7 @@
mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable));
mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
+ size_t class_tables_added = 0;
for (gc::space::ImageSpace* space : spaces) {
const ImageHeader& header = space->GetImageHeader();
const ImageSection& section = header.GetImageSection(ImageHeader::kSectionClassTable);
@@ -1116,9 +1121,17 @@
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
ClassTable* const class_table = InsertClassTableForClassLoader(nullptr);
class_table->ReadFromMemory(space->Begin() + section.Offset());
- dex_cache_boot_image_class_lookup_required_ = false;
+ ++class_tables_added;
}
}
+ if (class_tables_added != 0) {
+ // Either all of the image spaces have an empty class section or none do. In the case where
+ // an image space has no classes, it will still have a non-empty class section that contains
+ // metadata.
+ CHECK_EQ(spaces.size(), class_tables_added)
+ << "Expected non-empty class section for each image space.";
+ dex_cache_boot_image_class_lookup_required_ = false;
+ }
FinishInit(self);
diff --git a/runtime/class_table.h b/runtime/class_table.h
index c911365..911f3c2 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -106,8 +106,7 @@
// Combines all of the tables into one class set.
size_t WriteToMemory(uint8_t* ptr) const
- REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
size_t ReadFromMemory(uint8_t* ptr)
REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 403dd4c..624abb9 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -436,17 +436,26 @@
}
}
-std::vector<std::string> CommonRuntimeTest::GetLibCoreDexFileNames() {
- return std::vector<std::string>({GetDexFileName("core-oj"), GetDexFileName("core-libart")});
-}
-
-std::string CommonRuntimeTest::GetDexFileName(const std::string& jar_prefix) {
- if (IsHost()) {
+static std::string GetDexFileName(const std::string& jar_prefix, bool host) {
+ std::string path;
+ if (host) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
CHECK(host_dir != nullptr);
- return StringPrintf("%s/framework/%s-hostdex.jar", host_dir, jar_prefix.c_str());
+ path = host_dir;
+ } else {
+ path = GetAndroidRoot();
}
- return StringPrintf("%s/framework/%s.jar", GetAndroidRoot(), jar_prefix.c_str());
+
+ std::string suffix = host
+ ? "-hostdex" // The host version.
+ : "-testdex"; // The unstripped target version.
+
+ return StringPrintf("%s/framework/%s%s.jar", path.c_str(), jar_prefix.c_str(), suffix.c_str());
+}
+
+std::vector<std::string> CommonRuntimeTest::GetLibCoreDexFileNames() {
+ return std::vector<std::string>({GetDexFileName("core-oj", IsHost()),
+ GetDexFileName("core-libart", IsHost())});
}
std::string CommonRuntimeTest::GetTestAndroidRoot() {
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 8d9e628..7223b6e 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -118,9 +118,6 @@
// initializers, initialize well-known classes, and creates the heap thread pool.
virtual void FinalizeSetup();
- // Gets the path of the specified dex file for host or target.
- static std::string GetDexFileName(const std::string& jar_prefix);
-
std::string GetTestAndroidRoot();
std::string GetTestDexFileName(const char* name);
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index d68b463..40e2b15 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -18,6 +18,8 @@
#include <sstream>
+#include "ScopedLocalRef.h"
+
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/logging.h"
@@ -522,6 +524,104 @@
va_end(args);
}
+// Stack overflow.
+
+void ThrowStackOverflowError(Thread* self) {
+ if (self->IsHandlingStackOverflow()) {
+ LOG(ERROR) << "Recursive stack overflow.";
+ // We don't fail here because SetStackEndForStackOverflow will print better diagnostics.
+ }
+
+ self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute.
+ JNIEnvExt* env = self->GetJniEnv();
+ std::string msg("stack size ");
+ msg += PrettySize(self->GetStackSize());
+
+ // Avoid running Java code for exception initialization.
+ // TODO: Checks to make this a bit less brittle.
+
+ std::string error_msg;
+
+ // Allocate an uninitialized object.
+ ScopedLocalRef<jobject> exc(env,
+ env->AllocObject(WellKnownClasses::java_lang_StackOverflowError));
+ if (exc.get() != nullptr) {
+ // "Initialize".
+ // StackOverflowError -> VirtualMachineError -> Error -> Throwable -> Object.
+ // Only Throwable has "custom" fields:
+ // String detailMessage.
+ // Throwable cause (= this).
+ // List<Throwable> suppressedExceptions (= Collections.emptyList()).
+ // Object stackState;
+ // StackTraceElement[] stackTrace;
+ // Only Throwable has a non-empty constructor:
+ // this.stackTrace = EmptyArray.STACK_TRACE_ELEMENT;
+ // fillInStackTrace();
+
+ // detailMessage.
+ // TODO: Use String::FromModifiedUTF...?
+ ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg.c_str()));
+ if (s.get() != nullptr) {
+ env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get());
+
+ // cause.
+ env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get());
+
+ // suppressedExceptions.
+ ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
+ WellKnownClasses::java_util_Collections,
+ WellKnownClasses::java_util_Collections_EMPTY_LIST));
+ CHECK(emptylist.get() != nullptr);
+ env->SetObjectField(exc.get(),
+ WellKnownClasses::java_lang_Throwable_suppressedExceptions,
+ emptylist.get());
+
+ // stackState is set as result of fillInStackTrace. fillInStackTrace calls
+ // nativeFillInStackTrace.
+ ScopedLocalRef<jobject> stack_state_val(env, nullptr);
+ {
+ ScopedObjectAccessUnchecked soa(env);
+ stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
+ }
+ if (stack_state_val.get() != nullptr) {
+ env->SetObjectField(exc.get(),
+ WellKnownClasses::java_lang_Throwable_stackState,
+ stack_state_val.get());
+
+ // stackTrace.
+ ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
+ WellKnownClasses::libcore_util_EmptyArray,
+ WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT));
+ env->SetObjectField(exc.get(),
+ WellKnownClasses::java_lang_Throwable_stackTrace,
+ stack_trace_elem.get());
+ } else {
+ error_msg = "Could not create stack trace.";
+ }
+ // Throw the exception.
+ self->SetException(reinterpret_cast<mirror::Throwable*>(self->DecodeJObject(exc.get())));
+ } else {
+ // Could not allocate a string object.
+ error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed.";
+ }
+ } else {
+ error_msg = "Could not allocate StackOverflowError object.";
+ }
+
+ if (!error_msg.empty()) {
+ LOG(WARNING) << error_msg;
+ CHECK(self->IsExceptionPending());
+ }
+
+ bool explicit_overflow_check = Runtime::Current()->ExplicitStackOverflowChecks();
+ self->ResetDefaultStackEnd(); // Return to default stack size.
+
+ // And restore protection if implicit checks are on.
+ if (!explicit_overflow_check) {
+ self->ProtectStack();
+ }
+}
+
// VerifyError
void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...) {
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 2a0934f..85fe2b3 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -154,10 +154,10 @@
void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
const StringPiece& type, const StringPiece& name)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
// NoSuchMethodError
@@ -194,6 +194,10 @@
__attribute__((__format__(__printf__, 1, 2)))
SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+// Stack overflow.
+
+void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+
// VerifyError
void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...)
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index f009fe6..6e11cf8 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -726,12 +726,11 @@
JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
JDWP::JdwpError error;
- mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
- if (o == nullptr) {
- return JDWP::ERR_INVALID_OBJECT;
+ mirror::Class* c = DecodeClass(id, &error);
+ if (c == nullptr) {
+ return error;
}
- DCHECK(o->IsClass());
- expandBufAddObjectId(pReply, gRegistry->Add(o->AsClass()->GetClassLoader()));
+ expandBufAddObjectId(pReply, gRegistry->Add(c->GetClassLoader()));
return JDWP::ERR_NONE;
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 915d9ab..b5a55bf 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -33,7 +33,6 @@
#include "oat_quick_method_header.h"
#include "reflection.h"
#include "scoped_thread_state_change.h"
-#include "ScopedLocalRef.h"
#include "well_known_classes.h"
namespace art {
@@ -120,102 +119,6 @@
heap->GetCurrentAllocator());
}
-void ThrowStackOverflowError(Thread* self) {
- if (self->IsHandlingStackOverflow()) {
- LOG(ERROR) << "Recursive stack overflow.";
- // We don't fail here because SetStackEndForStackOverflow will print better diagnostics.
- }
-
- self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute.
- JNIEnvExt* env = self->GetJniEnv();
- std::string msg("stack size ");
- msg += PrettySize(self->GetStackSize());
-
- // Avoid running Java code for exception initialization.
- // TODO: Checks to make this a bit less brittle.
-
- std::string error_msg;
-
- // Allocate an uninitialized object.
- ScopedLocalRef<jobject> exc(env,
- env->AllocObject(WellKnownClasses::java_lang_StackOverflowError));
- if (exc.get() != nullptr) {
- // "Initialize".
- // StackOverflowError -> VirtualMachineError -> Error -> Throwable -> Object.
- // Only Throwable has "custom" fields:
- // String detailMessage.
- // Throwable cause (= this).
- // List<Throwable> suppressedExceptions (= Collections.emptyList()).
- // Object stackState;
- // StackTraceElement[] stackTrace;
- // Only Throwable has a non-empty constructor:
- // this.stackTrace = EmptyArray.STACK_TRACE_ELEMENT;
- // fillInStackTrace();
-
- // detailMessage.
- // TODO: Use String::FromModifiedUTF...?
- ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg.c_str()));
- if (s.get() != nullptr) {
- env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get());
-
- // cause.
- env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get());
-
- // suppressedExceptions.
- ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
- WellKnownClasses::java_util_Collections,
- WellKnownClasses::java_util_Collections_EMPTY_LIST));
- CHECK(emptylist.get() != nullptr);
- env->SetObjectField(exc.get(),
- WellKnownClasses::java_lang_Throwable_suppressedExceptions,
- emptylist.get());
-
- // stackState is set as result of fillInStackTrace. fillInStackTrace calls
- // nativeFillInStackTrace.
- ScopedLocalRef<jobject> stack_state_val(env, nullptr);
- {
- ScopedObjectAccessUnchecked soa(env);
- stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
- }
- if (stack_state_val.get() != nullptr) {
- env->SetObjectField(exc.get(),
- WellKnownClasses::java_lang_Throwable_stackState,
- stack_state_val.get());
-
- // stackTrace.
- ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
- WellKnownClasses::libcore_util_EmptyArray,
- WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT));
- env->SetObjectField(exc.get(),
- WellKnownClasses::java_lang_Throwable_stackTrace,
- stack_trace_elem.get());
- } else {
- error_msg = "Could not create stack trace.";
- }
- // Throw the exception.
- self->SetException(reinterpret_cast<mirror::Throwable*>(self->DecodeJObject(exc.get())));
- } else {
- // Could not allocate a string object.
- error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed.";
- }
- } else {
- error_msg = "Could not allocate StackOverflowError object.";
- }
-
- if (!error_msg.empty()) {
- LOG(WARNING) << error_msg;
- CHECK(self->IsExceptionPending());
- }
-
- bool explicit_overflow_check = Runtime::Current()->ExplicitStackOverflowChecks();
- self->ResetDefaultStackEnd(); // Return to default stack size.
-
- // And restore protection if implicit checks are on.
- if (!explicit_overflow_check) {
- self->ProtectStack();
- }
-}
-
void CheckReferenceResult(mirror::Object* o, Thread* self) {
if (o == nullptr) {
return;
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 0469ee6..a28376f 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -158,8 +158,6 @@
uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access)
SHARED_REQUIRES(Locks::mutator_lock_);
-extern void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
-
inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 5a82b3a..5256fea 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -16,7 +16,6 @@
#include "callee_save_frame.h"
#include "common_throws.h"
-#include "entrypoints/entrypoint_utils-inl.h"
#include "mirror/object-inl.h"
#include "thread.h"
#include "well_known_classes.h"
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 7f67ae4..d6c1817 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -571,15 +571,19 @@
// Check that there's no gap between the image space and the non moving space so that the
// immune region won't break (eg. due to a large object allocated in the gap). This is only
// required when we're the zygote or using GSS.
- /* TODO: Modify this check to support multi-images. b/26317072
- bool no_gap = MemMap::CheckNoGaps(GetBootImageSpace()->GetMemMap(),
- non_moving_space_->GetMemMap());
+ // Space with smallest Begin().
+ space::ImageSpace* first_space = nullptr;
+ for (space::ImageSpace* space : boot_image_spaces_) {
+ if (first_space == nullptr || space->Begin() < first_space->Begin()) {
+ first_space = space;
+ }
+ }
+ bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
if (!no_gap) {
PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
MemMap::DumpMaps(LOG(ERROR), true);
LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
- */
}
instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
if (gc_stress_mode_) {
@@ -2333,7 +2337,7 @@
if (HasZygoteSpace()) {
return;
}
- Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote();
+ Runtime::Current()->GetInternTable()->AddNewTable();
Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
VLOG(heap) << "Starting PreZygoteFork";
// Trim the pages at the end of the non moving space.
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index dfdbd04..5f6bb8e 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -47,13 +47,15 @@
const char* image_location,
MemMap* mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
- uint8_t* end,
- MemMap* shadow_map)
- : MemMapSpace(image_filename, mem_map, mem_map->Begin(), end, end,
+ uint8_t* end)
+ : MemMapSpace(image_filename,
+ mem_map,
+ mem_map->Begin(),
+ end,
+ end,
kGcRetentionPolicyNeverCollect),
oat_file_non_owned_(nullptr),
- image_location_(image_location),
- shadow_map_(shadow_map) {
+ image_location_(image_location) {
DCHECK(live_bitmap != nullptr);
live_bitmap_.reset(live_bitmap);
}
@@ -800,54 +802,19 @@
uint32_t bitmap_index = bitmap_index_.FetchAndAddSequentiallyConsistent(1);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_filename,
bitmap_index));
+ // Bitmap only needs to cover until the end of the mirror objects section.
+ const ImageSection& image_objects = image_header.GetImageSection(ImageHeader::kSectionObjects);
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
accounting::ContinuousSpaceBitmap::CreateFromMemMap(
bitmap_name,
image_bitmap_map.release(),
reinterpret_cast<uint8_t*>(map->Begin()),
- accounting::ContinuousSpaceBitmap::ComputeHeapSize(bitmap_section.Size())));
+ image_objects.End()));
if (bitmap == nullptr) {
*error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
return nullptr;
}
- // In case of multi-images, the images are spaced apart so that the bitmaps don't overlap. We
- // need to reserve the slack, as otherwise the large object space might allocate in there.
- // TODO: Reconsider the multi-image layout. b/26317072
- std::unique_ptr<MemMap> shadow_map;
- {
- uintptr_t image_begin = reinterpret_cast<uintptr_t>(image_header.GetImageBegin());
- uintptr_t image_end = RoundUp(image_begin + image_header.GetImageSize(), kPageSize);
- uintptr_t oat_begin = reinterpret_cast<uintptr_t>(image_header.GetOatFileBegin());
- if (image_end < oat_begin) {
- // There's a gap. Could be multi-image, could be the oat file spaced apart. Go ahead and
- // dummy-reserve the space covered by the bitmap (which will be a shadow that introduces
- // a gap to the next image).
- uintptr_t heap_size = bitmap->HeapSize();
- uintptr_t bitmap_coverage_end = RoundUp(image_begin + heap_size, kPageSize);
- if (bitmap_coverage_end > image_end) {
- VLOG(startup) << "Reserving bitmap shadow ["
- << std::hex << image_end << ";"
- << std::hex << bitmap_coverage_end << ";] (oat file begins at "
- << std::hex << oat_begin;
- // Note: we cannot use MemMap::Dummy here, as that won't reserve the space in 32-bit mode.
- shadow_map.reset(MemMap::MapAnonymous("Image bitmap shadow",
- reinterpret_cast<uint8_t*>(image_end),
- bitmap_coverage_end - image_end,
- PROT_NONE,
- false,
- false,
- error_msg));
- if (shadow_map == nullptr) {
- return nullptr;
- }
- // madvise it away, we don't really want it, just reserve the address space.
- // TODO: Should we use MadviseDontNeedAndZero? b/26317072
- madvise(shadow_map->BaseBegin(), shadow_map->BaseSize(), MADV_DONTNEED);
- }
- }
- }
-
// We only want the mirror object, not the ArtFields and ArtMethods.
uint8_t* const image_end =
map->Begin() + image_header.GetImageSection(ImageHeader::kSectionObjects).End();
@@ -855,8 +822,7 @@
image_location,
map.release(),
bitmap.release(),
- image_end,
- shadow_map.release()));
+ image_end));
// VerifyImageAllocations() will be called later in Runtime::Init()
// as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index b8ae4a0..9c8e8b2 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -171,8 +171,7 @@
const char* image_location,
MemMap* mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
- uint8_t* end,
- MemMap* shadow_map = nullptr);
+ uint8_t* end);
// The OatFile associated with the image during early startup to
// reserve space contiguous to the image. It is later released to
@@ -185,10 +184,6 @@
const std::string image_location_;
- // A MemMap reserving the space of the bitmap "shadow," so that we don't allocate into it. Only
- // used in the multi-image case.
- std::unique_ptr<MemMap> shadow_map_;
-
private:
DISALLOW_COPY_AND_ASSIGN(ImageSpace);
};
diff --git a/runtime/image.cc b/runtime/image.cc
index 3856787..3cb6642 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '4', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '5', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index d035f5d..015bf98 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -32,7 +32,8 @@
namespace art {
InternTable::InternTable()
- : image_added_to_intern_table_(false), log_new_roots_(false),
+ : images_added_to_intern_table_(false),
+ log_new_roots_(false),
weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
weak_root_state_(gc::kWeakRootStateNormal) {
}
@@ -93,10 +94,10 @@
return weak_interns_.Find(s);
}
-void InternTable::SwapPostZygoteWithPreZygote() {
+void InternTable::AddNewTable() {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- weak_interns_.SwapPostZygoteWithPreZygote();
- strong_interns_.SwapPostZygoteWithPreZygote();
+ weak_interns_.AddNewTable();
+ strong_interns_.AddNewTable();
}
mirror::String* InternTable::InsertStrong(mirror::String* s) {
@@ -150,15 +151,14 @@
RemoveWeak(s);
}
-void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
- CHECK(image_space != nullptr);
+void InternTable::AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- if (!image_added_to_intern_table_) {
+ for (gc::space::ImageSpace* image_space : image_spaces) {
const ImageHeader* const header = &image_space->GetImageHeader();
// Check if we have the interned strings section.
const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
if (section.Size() > 0) {
- ReadFromMemoryLocked(image_space->Begin() + section.Offset());
+ AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
} else {
// TODO: Delete this logic?
mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
@@ -179,15 +179,13 @@
}
}
}
- image_added_to_intern_table_ = true;
}
+ images_added_to_intern_table_ = true;
}
mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
- if (image_added_to_intern_table_) {
- return nullptr;
- }
- std::vector<gc::space::ImageSpace*> image_spaces =
+ DCHECK(!images_added_to_intern_table_);
+ const std::vector<gc::space::ImageSpace*>& image_spaces =
Runtime::Current()->GetHeap()->GetBootImageSpaces();
if (image_spaces.empty()) {
return nullptr; // No image present.
@@ -284,9 +282,11 @@
return weak;
}
// Check the image for a match.
- mirror::String* image = LookupStringFromImage(s);
- if (image != nullptr) {
- return is_strong ? InsertStrong(image) : InsertWeak(image);
+ if (!images_added_to_intern_table_) {
+ mirror::String* const image_string = LookupStringFromImage(s);
+ if (image_string != nullptr) {
+ return is_strong ? InsertStrong(image_string) : InsertWeak(image_string);
+ }
}
// No match in the strong table or the weak table. Insert into the strong / weak table.
return is_strong ? InsertStrong(s) : InsertWeak(s);
@@ -326,27 +326,18 @@
weak_interns_.SweepWeaks(visitor);
}
-void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
- const ImageSection& intern_section = image_space->GetImageHeader().GetImageSection(
- ImageHeader::kSectionInternedStrings);
- // Read the string tables from the image.
- const uint8_t* ptr = image_space->Begin() + intern_section.Offset();
- const size_t offset = ReadFromMemory(ptr);
- CHECK_LE(offset, intern_section.Size());
-}
-
-size_t InternTable::ReadFromMemory(const uint8_t* ptr) {
+size_t InternTable::AddTableFromMemory(const uint8_t* ptr) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return ReadFromMemoryLocked(ptr);
+ return AddTableFromMemoryLocked(ptr);
}
-size_t InternTable::ReadFromMemoryLocked(const uint8_t* ptr) {
- return strong_interns_.ReadIntoPreZygoteTable(ptr);
+size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) {
+ return strong_interns_.AddTableFromMemory(ptr);
}
size_t InternTable::WriteToMemory(uint8_t* ptr) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return strong_interns_.WriteFromPostZygoteTable(ptr);
+ return strong_interns_.WriteToMemory(ptr);
}
std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
@@ -364,71 +355,87 @@
return a.Read()->Equals(b.Read());
}
-size_t InternTable::Table::ReadIntoPreZygoteTable(const uint8_t* ptr) {
- CHECK_EQ(pre_zygote_table_.Size(), 0u);
+size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
size_t read_count = 0;
- pre_zygote_table_ = UnorderedSet(ptr, false /* make copy */, &read_count);
+ UnorderedSet set(ptr, /*make copy*/false, &read_count);
+ // TODO: Disable this for app images if app images have intern tables.
+ static constexpr bool kCheckDuplicates = true;
+ if (kCheckDuplicates) {
+ for (GcRoot<mirror::String>& string : set) {
+ CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
+ }
+ }
+ // Insert at the front since we insert into the back.
+ tables_.insert(tables_.begin(), std::move(set));
return read_count;
}
-size_t InternTable::Table::WriteFromPostZygoteTable(uint8_t* ptr) {
- return post_zygote_table_.WriteToMemory(ptr);
+size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
+ if (tables_.empty()) {
+ return 0;
+ }
+ UnorderedSet* table_to_write;
+ UnorderedSet combined;
+ if (tables_.size() > 1) {
+ table_to_write = &combined;
+ for (UnorderedSet& table : tables_) {
+ for (GcRoot<mirror::String>& string : table) {
+ combined.Insert(string);
+ }
+ }
+ } else {
+ table_to_write = &tables_.back();
+ }
+ return table_to_write->WriteToMemory(ptr);
}
void InternTable::Table::Remove(mirror::String* s) {
- auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
- if (it != post_zygote_table_.end()) {
- post_zygote_table_.Erase(it);
- } else {
- it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
- DCHECK(it != pre_zygote_table_.end());
- pre_zygote_table_.Erase(it);
+ for (UnorderedSet& table : tables_) {
+ auto it = table.Find(GcRoot<mirror::String>(s));
+ if (it != table.end()) {
+ table.Erase(it);
+ return;
+ }
}
+ LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
}
mirror::String* InternTable::Table::Find(mirror::String* s) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
- auto it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
- if (it != pre_zygote_table_.end()) {
- return it->Read();
- }
- it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
- if (it != post_zygote_table_.end()) {
- return it->Read();
+ for (UnorderedSet& table : tables_) {
+ auto it = table.Find(GcRoot<mirror::String>(s));
+ if (it != table.end()) {
+ return it->Read();
+ }
}
return nullptr;
}
-void InternTable::Table::SwapPostZygoteWithPreZygote() {
- if (pre_zygote_table_.Empty()) {
- std::swap(pre_zygote_table_, post_zygote_table_);
- VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
- } else {
- // This case happens if read the intern table from the image.
- VLOG(heap) << "Not swapping due to non-empty pre_zygote_table_";
- }
+void InternTable::Table::AddNewTable() {
+ tables_.push_back(UnorderedSet());
}
void InternTable::Table::Insert(mirror::String* s) {
- // Always insert the post zygote table, this gets swapped when we create the zygote to be the
- // pre zygote table.
- post_zygote_table_.Insert(GcRoot<mirror::String>(s));
+ // Always insert the last table, the image tables are before and we avoid inserting into these
+ // to prevent dirty pages.
+ DCHECK(!tables_.empty());
+ tables_.back().Insert(GcRoot<mirror::String>(s));
}
void InternTable::Table::VisitRoots(RootVisitor* visitor) {
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
visitor, RootInfo(kRootInternedString));
- for (auto& intern : pre_zygote_table_) {
- buffered_visitor.VisitRoot(intern);
- }
- for (auto& intern : post_zygote_table_) {
- buffered_visitor.VisitRoot(intern);
+ for (UnorderedSet& table : tables_) {
+ for (auto& intern : table) {
+ buffered_visitor.VisitRoot(intern);
+ }
}
}
void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
- SweepWeaks(&pre_zygote_table_, visitor);
- SweepWeaks(&post_zygote_table_, visitor);
+ for (UnorderedSet& table : tables_) {
+ SweepWeaks(&table, visitor);
+ }
}
void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
@@ -446,7 +453,12 @@
}
size_t InternTable::Table::Size() const {
- return pre_zygote_table_.Size() + post_zygote_table_.Size();
+ return std::accumulate(tables_.begin(),
+ tables_.end(),
+ 0U,
+ [](size_t sum, const UnorderedSet& set) {
+ return sum + set.Size();
+ });
}
void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
@@ -464,10 +476,10 @@
InternTable::Table::Table() {
Runtime* const runtime = Runtime::Current();
- pre_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
- runtime->GetHashTableMaxLoadFactor());
- post_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
- runtime->GetHashTableMaxLoadFactor());
+ // Initial table.
+ tables_.push_back(UnorderedSet());
+ tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor());
}
} // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 3a4e8d8..8f715a3 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -98,22 +98,20 @@
void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
- // Adds all of the resolved image strings from the image space into the intern table. The
- // advantage of doing this is preventing expensive DexFile::FindStringId calls.
- void AddImageStringsToTable(gc::space::ImageSpace* image_space)
+ // Adds all of the resolved image strings from the image spaces into the intern table. The
+ // advantage of doing this is preventing expensive DexFile::FindStringId calls. Sets
+ // images_added_to_intern_table_ to true.
+ void AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
- // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages.
- void SwapPostZygoteWithPreZygote()
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
-
- // Add an intern table which was serialized to the image.
- void AddImageInternTable(gc::space::ImageSpace* image_space)
+ // Add a new intern table for inserting to, previous intern tables are still there but no
+ // longer inserted into and ideally unmodified. This is done to prevent dirty pages.
+ void AddNewTable()
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
// Read the intern table from memory. The elements aren't copied, the intern hash set data will
// point to somewhere within ptr. Only reads the strong interns.
- size_t ReadFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_)
+ size_t AddTableFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Write the post zygote intern table to a pointer. Only writes the strong interns since it is
@@ -157,15 +155,17 @@
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void SweepWeaks(IsMarkedVisitor* visitor)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
- void SwapPostZygoteWithPreZygote() REQUIRES(Locks::intern_table_lock_);
+ // Add a new intern table that will only be inserted into from now on.
+ void AddNewTable() REQUIRES(Locks::intern_table_lock_);
size_t Size() const REQUIRES(Locks::intern_table_lock_);
- // Read pre zygote table is called from ReadFromMemory which happens during runtime creation
- // when we load the image intern table. Returns how many bytes were read.
- size_t ReadIntoPreZygoteTable(const uint8_t* ptr)
+ // Read and add an intern table from ptr.
+ // Tables read are inserted at the front of the table array. Only checks for conflicts in
+ // debug builds. Returns how many bytes were read.
+ size_t AddTableFromMemory(const uint8_t* ptr)
REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- // The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in
- // the post zygote table. Returns how many bytes were written.
- size_t WriteFromPostZygoteTable(uint8_t* ptr)
+ // Write the intern tables to ptr, if there are multiple tables they are combined into a single
+ // one. Returns how many bytes were written.
+ size_t WriteToMemory(uint8_t* ptr)
REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
private:
@@ -175,12 +175,9 @@
void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
- // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages
- // caused by modifying the zygote intern table hash table. The pre zygote table are the
- // interned strings which were interned before we created the zygote space. Post zygote is self
- // explanatory.
- UnorderedSet pre_zygote_table_;
- UnorderedSet post_zygote_table_;
+ // We call AddNewTable when we create the zygote to reduce private dirty pages caused by
+ // modifying the zygote intern table. The back of table is modified when strings are interned.
+ std::vector<UnorderedSet> tables_;
};
// Insert if non null, otherwise return null. Must be called holding the mutator lock.
@@ -214,7 +211,7 @@
void RemoveWeakFromTransaction(mirror::String* s)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
- size_t ReadFromMemoryLocked(const uint8_t* ptr)
+ size_t AddTableFromMemoryLocked(const uint8_t* ptr)
REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Change the weak root state. May broadcast to waiters.
@@ -225,7 +222,7 @@
void WaitUntilAccessible(Thread* self)
REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
+ bool images_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
// Since this contains (strong) roots, they need a read barrier to
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 871fad7..8d5a61a 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -18,6 +18,7 @@
#include <limits>
+#include "common_throws.h"
#include "interpreter_common.h"
#include "mirror/string-inl.h"
#include "scoped_thread_state_change.h"
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 08eac0e..c260ca4 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -668,6 +668,11 @@
return nullptr;
}
info = new (data) ProfilingInfo(method, entries);
+
+ // Make sure other threads see the data in the profiling info object before the
+ // store in the ArtMethod's ProfilingInfo pointer.
+ QuasiAtomic::ThreadFenceRelease();
+
method->SetProfilingInfo(info);
profiling_infos_.push_back(info);
return info;
diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc
index 5dc0e45..a132701 100644
--- a/runtime/jit/offline_profiling_info.cc
+++ b/runtime/jit/offline_profiling_info.cc
@@ -30,38 +30,40 @@
namespace art {
-void OfflineProfilingInfo::SaveProfilingInfo(const std::string& filename,
- const std::vector<ArtMethod*>& methods) {
+bool ProfileCompilationInfo::SaveProfilingInfo(const std::string& filename,
+ const std::vector<ArtMethod*>& methods) {
if (methods.empty()) {
VLOG(profiler) << "No info to save to " << filename;
- return;
+ return true;
}
- DexFileToMethodsMap info;
+ ProfileCompilationInfo info;
+ if (!info.Load(filename)) {
+ LOG(WARNING) << "Could not load previous profile data from file " << filename;
+ return false;
+ }
{
ScopedObjectAccess soa(Thread::Current());
for (auto it = methods.begin(); it != methods.end(); it++) {
- AddMethodInfo(*it, &info);
+ const DexFile* dex_file = (*it)->GetDexFile();
+ if (!info.AddData(dex_file->GetLocation(),
+ dex_file->GetLocationChecksum(),
+ (*it)->GetDexMethodIndex())) {
+ return false;
+ }
}
}
// This doesn't need locking because we are trying to lock the file for exclusive
// access and fail immediately if we can't.
- if (Serialize(filename, info)) {
+ bool result = info.Save(filename);
+ if (result) {
VLOG(profiler) << "Successfully saved profile info to " << filename
<< " Size: " << GetFileSizeBytes(filename);
+ } else {
+ VLOG(profiler) << "Failed to save profile info to " << filename;
}
-}
-
-void OfflineProfilingInfo::AddMethodInfo(ArtMethod* method, DexFileToMethodsMap* info) {
- DCHECK(method != nullptr);
- const DexFile* dex_file = method->GetDexFile();
-
- auto info_it = info->find(dex_file);
- if (info_it == info->end()) {
- info_it = info->Put(dex_file, std::set<uint32_t>());
- }
- info_it->second.insert(method->GetDexMethodIndex());
+ return result;
}
enum OpenMode {
@@ -77,9 +79,7 @@
break;
case READ_WRITE:
// TODO(calin) allow the shared uid of the app to access the file.
- fd = open(filename.c_str(),
- O_CREAT | O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC,
- S_IRUSR | S_IWUSR);
+ fd = open(filename.c_str(), O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC);
break;
}
@@ -137,8 +137,7 @@
* /system/priv-app/app/app.apk,131232145,11,23,454,54
* /system/priv-app/app/app.apk:classes5.dex,218490184,39,13,49,1
**/
-bool OfflineProfilingInfo::Serialize(const std::string& filename,
- const DexFileToMethodsMap& info) const {
+bool ProfileCompilationInfo::Save(const std::string& filename) {
int fd = OpenFile(filename, READ_WRITE);
if (fd == -1) {
return false;
@@ -148,14 +147,12 @@
// TODO(calin): Profile this and see how much memory it takes. If too much,
// write to file directly.
std::ostringstream os;
- for (auto it : info) {
- const DexFile* dex_file = it.first;
- const std::set<uint32_t>& method_dex_ids = it.second;
+ for (const auto& it : info_) {
+ const std::string& dex_location = it.first;
+ const DexFileData& dex_data = it.second;
- os << dex_file->GetLocation()
- << kFieldSeparator
- << dex_file->GetLocationChecksum();
- for (auto method_it : method_dex_ids) {
+ os << dex_location << kFieldSeparator << dex_data.checksum;
+ for (auto method_it : dex_data.method_set) {
os << kFieldSeparator << method_it;
}
os << kLineSeparator;
@@ -190,8 +187,22 @@
}
}
-bool ProfileCompilationInfo::ProcessLine(const std::string& line,
- const std::vector<const DexFile*>& dex_files) {
+bool ProfileCompilationInfo::AddData(const std::string& dex_location,
+ uint32_t checksum,
+ uint16_t method_idx) {
+ auto info_it = info_.find(dex_location);
+ if (info_it == info_.end()) {
+ info_it = info_.Put(dex_location, DexFileData(checksum));
+ }
+ if (info_it->second.checksum != checksum) {
+ LOG(WARNING) << "Checksum mismatch for dex " << dex_location;
+ return false;
+ }
+ info_it->second.method_set.insert(method_idx);
+ return true;
+}
+
+bool ProfileCompilationInfo::ProcessLine(const std::string& line) {
std::vector<std::string> parts;
SplitString(line, kFieldSeparator, &parts);
if (parts.size() < 3) {
@@ -205,39 +216,13 @@
return false;
}
- const DexFile* current_dex_file = nullptr;
- for (auto dex_file : dex_files) {
- if (dex_file->GetLocation() == dex_location) {
- if (checksum != dex_file->GetLocationChecksum()) {
- LOG(WARNING) << "Checksum mismatch for "
- << dex_file->GetLocation() << " when parsing " << filename_;
- return false;
- }
- current_dex_file = dex_file;
- break;
- }
- }
- if (current_dex_file == nullptr) {
- return true;
- }
-
for (size_t i = 2; i < parts.size(); i++) {
uint32_t method_idx;
if (!ParseInt(parts[i].c_str(), &method_idx)) {
LOG(WARNING) << "Cannot parse method_idx " << parts[i];
return false;
}
- uint16_t class_idx = current_dex_file->GetMethodId(method_idx).class_idx_;
- auto info_it = info_.find(current_dex_file);
- if (info_it == info_.end()) {
- info_it = info_.Put(current_dex_file, ClassToMethodsMap());
- }
- ClassToMethodsMap& class_map = info_it->second;
- auto class_it = class_map.find(class_idx);
- if (class_it == class_map.end()) {
- class_it = class_map.Put(class_idx, std::set<uint32_t>());
- }
- class_it->second.insert(method_idx);
+ AddData(dex_location, checksum, method_idx);
}
return true;
}
@@ -264,25 +249,8 @@
return new_line_pos == -1 ? new_line_pos : new_line_pos + 1;
}
-bool ProfileCompilationInfo::Load(const std::vector<const DexFile*>& dex_files) {
- if (dex_files.empty()) {
- return true;
- }
- if (kIsDebugBuild) {
- // In debug builds verify that the locations are unique.
- std::set<std::string> locations;
- for (auto dex_file : dex_files) {
- const std::string& location = dex_file->GetLocation();
- DCHECK(locations.find(location) == locations.end())
- << "DexFiles appear to belong to different apks."
- << " There are multiple dex files with the same location: "
- << location;
- locations.insert(location);
- }
- }
- info_.clear();
-
- int fd = OpenFile(filename_, READ);
+bool ProfileCompilationInfo::Load(const std::string& filename) {
+ int fd = OpenFile(filename, READ);
if (fd == -1) {
return false;
}
@@ -295,7 +263,7 @@
while (success) {
int n = read(fd, buffer, kBufferSize);
if (n < 0) {
- PLOG(WARNING) << "Error when reading profile file " << filename_;
+ PLOG(WARNING) << "Error when reading profile file " << filename;
success = false;
break;
} else if (n == 0) {
@@ -309,7 +277,7 @@
if (current_start_pos == -1) {
break;
}
- if (!ProcessLine(current_line, dex_files)) {
+ if (!ProcessLine(current_line)) {
success = false;
break;
}
@@ -320,25 +288,50 @@
if (!success) {
info_.clear();
}
- return CloseDescriptorForFile(fd, filename_) && success;
+ return CloseDescriptorForFile(fd, filename) && success;
+}
+
+bool ProfileCompilationInfo::Load(const ProfileCompilationInfo& other) {
+ for (const auto& other_it : other.info_) {
+ const std::string& other_dex_location = other_it.first;
+ const DexFileData& other_dex_data = other_it.second;
+
+ auto info_it = info_.find(other_dex_location);
+ if (info_it == info_.end()) {
+ info_it = info_.Put(other_dex_location, DexFileData(other_dex_data.checksum));
+ }
+ if (info_it->second.checksum != other_dex_data.checksum) {
+ LOG(WARNING) << "Checksum mismatch for dex " << other_dex_location;
+ return false;
+ }
+ info_it->second.method_set.insert(other_dex_data.method_set.begin(),
+ other_dex_data.method_set.end());
+ }
+ return true;
}
bool ProfileCompilationInfo::ContainsMethod(const MethodReference& method_ref) const {
- auto info_it = info_.find(method_ref.dex_file);
+ auto info_it = info_.find(method_ref.dex_file->GetLocation());
if (info_it != info_.end()) {
- uint16_t class_idx = method_ref.dex_file->GetMethodId(method_ref.dex_method_index).class_idx_;
- const ClassToMethodsMap& class_map = info_it->second;
- auto class_it = class_map.find(class_idx);
- if (class_it != class_map.end()) {
- const std::set<uint32_t>& methods = class_it->second;
- return methods.find(method_ref.dex_method_index) != methods.end();
+ if (method_ref.dex_file->GetLocationChecksum() != info_it->second.checksum) {
+ return false;
}
- return false;
+ const std::set<uint16_t>& methods = info_it->second.method_set;
+ return methods.find(method_ref.dex_method_index) != methods.end();
}
return false;
}
-std::string ProfileCompilationInfo::DumpInfo(bool print_full_dex_location) const {
+uint32_t ProfileCompilationInfo::GetNumberOfMethods() const {
+ uint32_t total = 0;
+ for (const auto& it : info_) {
+ total += it.second.method_set.size();
+ }
+ return total;
+}
+
+std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>* dex_files,
+ bool print_full_dex_location) const {
std::ostringstream os;
if (info_.empty()) {
return "ProfileInfo: empty";
@@ -346,17 +339,11 @@
os << "ProfileInfo:";
- // Use an additional map to achieve a predefined order based on the dex locations.
- SafeMap<const std::string, const DexFile*> dex_locations_map;
- for (auto info_it : info_) {
- dex_locations_map.Put(info_it.first->GetLocation(), info_it.first);
- }
-
const std::string kFirstDexFileKeySubstitute = ":classes.dex";
- for (auto dex_file_it : dex_locations_map) {
+ for (const auto& it : info_) {
os << "\n";
- const std::string& location = dex_file_it.first;
- const DexFile* dex_file = dex_file_it.second;
+ const std::string& location = it.first;
+ const DexFileData& dex_data = it.second;
if (print_full_dex_location) {
os << location;
} else {
@@ -364,10 +351,19 @@
std::string multidex_suffix = DexFile::GetMultiDexSuffix(location);
os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix);
}
- for (auto class_it : info_.find(dex_file)->second) {
- for (auto method_it : class_it.second) {
- os << "\n " << PrettyMethod(method_it, *dex_file, true);
+ for (const auto method_it : dex_data.method_set) {
+ if (dex_files != nullptr) {
+ const DexFile* dex_file = nullptr;
+ for (size_t i = 0; i < dex_files->size(); i++) {
+ if (location == (*dex_files)[i]->GetLocation()) {
+ dex_file = (*dex_files)[i];
+ }
+ }
+ if (dex_file != nullptr) {
+ os << "\n " << PrettyMethod(method_it, *dex_file, true);
+ }
}
+ os << "\n " << method_it;
}
}
return os.str();
diff --git a/runtime/jit/offline_profiling_info.h b/runtime/jit/offline_profiling_info.h
index 32d4c5b..26e1ac3 100644
--- a/runtime/jit/offline_profiling_info.h
+++ b/runtime/jit/offline_profiling_info.h
@@ -29,60 +29,50 @@
class ArtMethod;
+// TODO: rename file.
/**
- * Profiling information in a format that can be serialized to disk.
- * It is a serialize-friendly format based on information collected
- * by the interpreter (ProfileInfo).
+ * Profile information in a format suitable to be queried by the compiler and
+ * performing profile guided compilation.
+ * It is a serialize-friendly format based on information collected by the
+ * interpreter (ProfileInfo).
* Currently it stores only the hot compiled methods.
*/
-class OfflineProfilingInfo {
- public:
- void SaveProfilingInfo(const std::string& filename, const std::vector<ArtMethod*>& methods);
-
- private:
- // Map identifying the location of the profiled methods.
- // dex_file_ -> [dex_method_index]+
- using DexFileToMethodsMap = SafeMap<const DexFile*, std::set<uint32_t>>;
-
- void AddMethodInfo(ArtMethod* method, DexFileToMethodsMap* info)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool Serialize(const std::string& filename, const DexFileToMethodsMap& info) const;
-};
-
-/**
- * Profile information in a format suitable to be queried by the compiler and performing
- * profile guided compilation.
- */
class ProfileCompilationInfo {
public:
- // Constructs a ProfileCompilationInfo backed by the provided file.
- explicit ProfileCompilationInfo(const std::string& filename) : filename_(filename) {}
+ static bool SaveProfilingInfo(const std::string& filename,
+ const std::vector<ArtMethod*>& methods);
- // Loads profile information corresponding to the provided dex files.
- // The dex files' multidex suffixes must be unique.
- // This resets the state of the profiling information
- // (i.e. all previously loaded info are cleared).
- bool Load(const std::vector<const DexFile*>& dex_files);
+ // Loads profile information from the given file.
+ bool Load(const std::string& profile_filename);
+ // Loads the data from another ProfileCompilationInfo object.
+ bool Load(const ProfileCompilationInfo& info);
+ // Saves the profile data to the given file.
+ bool Save(const std::string& profile_filename);
+ // Returns the number of methods that were profiled.
+ uint32_t GetNumberOfMethods() const;
// Returns true if the method reference is present in the profiling info.
bool ContainsMethod(const MethodReference& method_ref) const;
- const std::string& GetFilename() const { return filename_; }
-
// Dumps all the loaded profile info into a string and returns it.
+ // If dex_files is not null then the method indices will be resolved to their
+ // names.
// This is intended for testing and debugging.
- std::string DumpInfo(bool print_full_dex_location = true) const;
+ std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
+ bool print_full_dex_location = true) const;
private:
- bool ProcessLine(const std::string& line,
- const std::vector<const DexFile*>& dex_files);
+ bool AddData(const std::string& dex_location, uint32_t checksum, uint16_t method_idx);
+ bool ProcessLine(const std::string& line);
- using ClassToMethodsMap = SafeMap<uint32_t, std::set<uint32_t>>;
- // Map identifying the location of the profiled methods.
- // dex_file -> class_index -> [dex_method_index]+
- using DexFileToProfileInfoMap = SafeMap<const DexFile*, ClassToMethodsMap>;
+ struct DexFileData {
+ explicit DexFileData(uint32_t location_checksum) : checksum(location_checksum) {}
+ uint32_t checksum;
+ std::set<uint16_t> method_set;
+ };
- const std::string filename_;
+ using DexFileToProfileInfoMap = SafeMap<const std::string, DexFileData>;
+
DexFileToProfileInfoMap info_;
};
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 0278138..ec289ea 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -106,10 +106,9 @@
VLOG(profiler) << "Not enough information to save. Nr of methods: " << methods.size();
return false;
}
- offline_profiling_info_.SaveProfilingInfo(output_filename_, methods);
- VLOG(profiler) << "Saved profile time: " << PrettyDuration(NanoTime() - start);
-
+ ProfileCompilationInfo::SaveProfilingInfo(output_filename_, methods);
+ VLOG(profiler) << "Profile process time: " << PrettyDuration(NanoTime() - start);
return true;
}
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 88efd41..d60142b 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -66,7 +66,6 @@
const std::string output_filename_;
jit::JitCodeCache* jit_code_cache_;
const std::set<const std::string> tracked_dex_base_locations_;
- OfflineProfilingInfo offline_profiling_info_;
uint64_t code_cache_last_update_time_ns_;
bool shutting_down_ GUARDED_BY(Locks::profiler_lock_);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 4b24f82..da4a891 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -565,8 +565,8 @@
*/
static void VMRuntime_registerAppInfo(JNIEnv* env,
jclass clazz ATTRIBUTE_UNUSED,
- jstring pkg_name,
- jstring app_dir,
+ jstring profile_file,
+ jstring app_dir ATTRIBUTE_UNUSED, // TODO: remove argument
jobjectArray code_paths) {
std::vector<std::string> code_paths_vec;
int code_paths_length = env->GetArrayLength(code_paths);
@@ -577,13 +577,11 @@
env->ReleaseStringUTFChars(code_path, raw_code_path);
}
- const char* raw_app_dir = env->GetStringUTFChars(app_dir, nullptr);
- const char* raw_pkg_name = env->GetStringUTFChars(pkg_name, nullptr);
- std::string profile_file = StringPrintf("%s/code_cache/%s.prof", raw_app_dir, raw_pkg_name);
- env->ReleaseStringUTFChars(pkg_name, raw_pkg_name);
- env->ReleaseStringUTFChars(app_dir, raw_app_dir);
+ const char* raw_profile_file = env->GetStringUTFChars(profile_file, nullptr);
+ std::string profile_file_str(raw_profile_file);
+ env->ReleaseStringUTFChars(profile_file, raw_profile_file);
- Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file);
+ Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file_str);
}
static jboolean VMRuntime_isBootClassPathOnDisk(JNIEnv* env, jclass, jstring java_instruction_set) {
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index f42a17d..c177f19 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -80,7 +80,7 @@
// Starting with N nativeLoad uses classloader local
// linker namespace instead of global LD_LIBRARY_PATH
// (23 is Marshmallow)
- if (target_sdk_version <= INT_MAX) {
+ if (target_sdk_version == 0) {
SetLdLibraryPath(env, javaLibrarySearchPath);
}
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index e3de14b..83e594b 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -983,7 +983,6 @@
LOG(WARNING) << "Failed to find OatDexFile for DexFile " << dex_location
<< " ( canonical path " << dex_canonical_location << ")"
<< " with checksum " << checksum << " in OatFile " << GetLocation();
- /* TODO: Modify for multi-image support and reenable. b/26317072
if (kIsDebugBuild) {
for (const OatDexFile* odf : oat_dex_files_storage_) {
LOG(WARNING) << "OatFile " << GetLocation()
@@ -992,7 +991,6 @@
<< " with checksum 0x" << std::hex << odf->GetDexFileLocationChecksum();
}
}
- */
}
return nullptr;
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 324bd9f..28c27cd 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -21,7 +21,6 @@
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
-#include "entrypoints/entrypoint_utils.h"
#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
#include "mirror/abstract_method.h"
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 5c72629..6b8f17d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -214,6 +214,7 @@
}
Runtime::~Runtime() {
+ ATRACE_BEGIN("Runtime shutdown");
if (is_native_bridge_loaded_) {
UnloadNativeBridge();
}
@@ -228,45 +229,55 @@
Thread* self = Thread::Current();
const bool attach_shutdown_thread = self == nullptr;
if (attach_shutdown_thread) {
+ ATRACE_BEGIN("Attach shutdown thread");
CHECK(AttachCurrentThread("Shutdown thread", false, nullptr, false));
+ ATRACE_END();
self = Thread::Current();
} else {
LOG(WARNING) << "Current thread not detached in Runtime shutdown";
}
{
+ ATRACE_BEGIN("Wait for shutdown cond");
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
shutting_down_started_ = true;
while (threads_being_born_ > 0) {
shutdown_cond_->Wait(self);
}
shutting_down_ = true;
+ ATRACE_END();
}
// Shutdown and wait for the daemons.
CHECK(self != nullptr);
if (IsFinishedStarting()) {
+ ATRACE_BEGIN("Waiting for Daemons");
self->ClearException();
self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
WellKnownClasses::java_lang_Daemons_stop);
+ ATRACE_END();
}
Trace::Shutdown();
if (attach_shutdown_thread) {
+ ATRACE_BEGIN("Detach shutdown thread");
DetachCurrentThread();
+ ATRACE_END();
self = nullptr;
}
// Make sure to let the GC complete if it is running.
heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
heap_->DeleteThreadPool();
- if (jit_.get() != nullptr) {
+ if (jit_ != nullptr) {
+ ATRACE_BEGIN("Delete jit");
VLOG(jit) << "Deleting jit thread pool";
// Delete thread pool before the thread list since we don't want to wait forever on the
// JIT compiler threads.
jit_->DeleteThreadPool();
// Similarly, stop the profile saver thread before deleting the thread list.
jit_->StopProfileSaver();
+ ATRACE_END();
}
// Make sure our internal threads are dead before we start tearing down things they're using.
@@ -274,11 +285,13 @@
delete signal_catcher_;
// Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
+ ATRACE_BEGIN("Delete thread list");
delete thread_list_;
+ ATRACE_END();
// Delete the JIT after thread list to ensure that there is no remaining threads which could be
// accessing the instrumentation when we delete it.
- if (jit_.get() != nullptr) {
+ if (jit_ != nullptr) {
VLOG(jit) << "Deleting jit";
jit_.reset(nullptr);
}
@@ -286,6 +299,7 @@
// Shutdown the fault manager if it was initialized.
fault_manager.Shutdown();
+ ATRACE_BEGIN("Delete state");
delete monitor_list_;
delete monitor_pool_;
delete class_linker_;
@@ -302,10 +316,12 @@
low_4gb_arena_pool_.reset();
arena_pool_.reset();
MemMap::Shutdown();
+ ATRACE_END();
// TODO: acquire a static mutex on Runtime to avoid racing.
CHECK(instance_ == nullptr || instance_ == this);
instance_ = nullptr;
+ ATRACE_END();
}
struct AbortState {
@@ -543,12 +559,9 @@
// Use !IsAotCompiler so that we get test coverage, tests are never the zygote.
if (!IsAotCompiler()) {
ScopedObjectAccess soa(self);
- std::vector<gc::space::ImageSpace*> image_spaces = heap_->GetBootImageSpaces();
- for (gc::space::ImageSpace* image_space : image_spaces) {
- ATRACE_BEGIN("AddImageStringsToTable");
- GetInternTable()->AddImageStringsToTable(image_space);
- ATRACE_END();
- }
+ ATRACE_BEGIN("AddImageStringsToTable");
+ GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
+ ATRACE_END();
ATRACE_BEGIN("MoveImageClassesToClassTable");
GetClassLinker()->AddBootImageClassesToClassTable();
ATRACE_END();
@@ -1089,13 +1102,11 @@
LOG(ERROR) << "Could not initialize from image: " << error_msg;
return false;
}
- /* TODO: Modify check to support multiple image spaces and reenable. b/26317072
if (kIsDebugBuild) {
for (auto image_space : GetHeap()->GetBootImageSpaces()) {
image_space->VerifyImageAllocations();
}
}
- */
if (boot_class_path_string_.empty()) {
// The bootclasspath is not explicitly specified: construct it from the loaded dex files.
const std::vector<const DexFile*>& boot_class_path = GetClassLinker()->GetBootClassPath();
@@ -1685,13 +1696,29 @@
void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
const std::string& profile_output_filename) {
- VLOG(profiler) << "Register app with " << profile_output_filename_
- << " " << Join(code_paths, ':');
- DCHECK(!profile_output_filename.empty());
- profile_output_filename_ = profile_output_filename;
- if (jit_.get() != nullptr && !profile_output_filename.empty() && !code_paths.empty()) {
- jit_->StartProfileSaver(profile_output_filename, code_paths);
+ if (jit_.get() == nullptr) {
+ // We are not JITing. Nothing to do.
+ return;
}
+
+ VLOG(profiler) << "Register app with " << profile_output_filename
+ << " " << Join(code_paths, ':');
+
+ if (profile_output_filename.empty()) {
+ LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
+ return;
+ }
+ if (!FileExists(profile_output_filename)) {
+ LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
+ return;
+ }
+ if (code_paths.empty()) {
+ LOG(WARNING) << "JIT profile information will not be recorded: code paths is empty.";
+ return;
+ }
+
+ profile_output_filename_ = profile_output_filename;
+ jit_->StartProfileSaver(profile_output_filename, code_paths);
}
// Transaction support.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index a390908..77f780f 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -69,6 +69,7 @@
}
ThreadList::~ThreadList() {
+ ATRACE_BEGIN(__FUNCTION__);
// Detach the current thread if necessary. If we failed to start, there might not be any threads.
// We need to detach the current thread here in case there's another thread waiting to join with
// us.
@@ -79,19 +80,27 @@
contains = Contains(self);
}
if (contains) {
+ ATRACE_BEGIN("DetachCurrentThread");
Runtime::Current()->DetachCurrentThread();
+ ATRACE_END();
}
+ ATRACE_BEGIN("WaitForOtherNonDaemonThreadsToExit");
WaitForOtherNonDaemonThreadsToExit();
+ ATRACE_END();
// Disable GC and wait for GC to complete in case there are still daemon threads doing
// allocations.
gc::Heap* const heap = Runtime::Current()->GetHeap();
heap->DisableGCForShutdown();
// In case a GC is in progress, wait for it to finish.
+ ATRACE_BEGIN("WaitForGcToComplete");
heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current());
-
+ ATRACE_END();
// TODO: there's an unaddressed race here where a thread may attach during shutdown, see
// Thread::Init.
+ ATRACE_BEGIN("SuspendAllDaemonThreads");
SuspendAllDaemonThreads();
+ ATRACE_END();
+ ATRACE_END();
}
bool ThreadList::Contains(Thread* thread) {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index ff6b4c0..1e1c7e7 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1446,6 +1446,11 @@
return true;
}
+bool FileExists(const std::string& filename) {
+ struct stat buffer;
+ return stat(filename.c_str(), &buffer) == 0;
+}
+
std::string PrettyDescriptor(Primitive::Type type) {
return PrettyDescriptor(Primitive::Descriptor(type));
}
diff --git a/runtime/utils.h b/runtime/utils.h
index a07e74c..5ceb3b5 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -276,6 +276,9 @@
// Wrapper on fork/execv to run a command in a subprocess.
bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg);
+// Returns true if the file exists.
+bool FileExists(const std::string& filename);
+
class VoidFunctor {
public:
template <typename A>
diff --git a/test/004-ThreadStress/src/Main.java b/test/004-ThreadStress/src/Main.java
index 9461c0b..b9a46de 100644
--- a/test/004-ThreadStress/src/Main.java
+++ b/test/004-ThreadStress/src/Main.java
@@ -57,12 +57,14 @@
}
private final static class OOM extends Operation {
+ private final static int ALLOC_SIZE = 1024;
+
@Override
public boolean perform() {
try {
List<byte[]> l = new ArrayList<byte[]>();
while (true) {
- l.add(new byte[1024]);
+ l.add(new byte[ALLOC_SIZE]);
}
} catch (OutOfMemoryError e) {
}
@@ -115,12 +117,33 @@
}
private final static class Alloc extends Operation {
+ private final static int ALLOC_SIZE = 1024; // Needs to be small enough to not be in LOS.
+ private final static int ALLOC_COUNT = 1024;
+
@Override
public boolean perform() {
try {
List<byte[]> l = new ArrayList<byte[]>();
- for (int i = 0; i < 1024; i++) {
- l.add(new byte[1024]);
+ for (int i = 0; i < ALLOC_COUNT; i++) {
+ l.add(new byte[ALLOC_SIZE]);
+ }
+ } catch (OutOfMemoryError e) {
+ }
+ return true;
+ }
+ }
+
+ private final static class LargeAlloc extends Operation {
+ private final static int PAGE_SIZE = 4096;
+ private final static int PAGE_SIZE_MODIFIER = 10; // Needs to be large enough for LOS.
+ private final static int ALLOC_COUNT = 100;
+
+ @Override
+ public boolean perform() {
+ try {
+ List<byte[]> l = new ArrayList<byte[]>();
+ for (int i = 0; i < ALLOC_COUNT; i++) {
+ l.add(new byte[PAGE_SIZE_MODIFIER * PAGE_SIZE]);
}
} catch (OutOfMemoryError e) {
}
@@ -144,10 +167,12 @@
}
private final static class Sleep extends Operation {
+ private final static int SLEEP_TIME = 100;
+
@Override
public boolean perform() {
try {
- Thread.sleep(100);
+ Thread.sleep(SLEEP_TIME);
} catch (InterruptedException ignored) {
}
return true;
@@ -155,6 +180,8 @@
}
private final static class TimedWait extends Operation {
+ private final static int SLEEP_TIME = 100;
+
private final Object lock;
public TimedWait(Object lock) {
@@ -165,7 +192,7 @@
public boolean perform() {
synchronized (lock) {
try {
- lock.wait(100, 0);
+ lock.wait(SLEEP_TIME, 0);
} catch (InterruptedException ignored) {
}
}
@@ -215,7 +242,8 @@
Map<Operation, Double> frequencyMap = new HashMap<Operation, Double>();
frequencyMap.put(new OOM(), 0.005); // 1/200
frequencyMap.put(new SigQuit(), 0.095); // 19/200
- frequencyMap.put(new Alloc(), 0.3); // 60/200
+ frequencyMap.put(new Alloc(), 0.25); // 50/200
+ frequencyMap.put(new LargeAlloc(), 0.05); // 10/200
frequencyMap.put(new StackTrace(), 0.1); // 20/200
frequencyMap.put(new Exit(), 0.25); // 50/200
frequencyMap.put(new Sleep(), 0.125); // 25/200
@@ -261,6 +289,8 @@
op = new SigQuit();
} else if (split[0].equals("-alloc")) {
op = new Alloc();
+ } else if (split[0].equals("-largealloc")) {
+ op = new LargeAlloc();
} else if (split[0].equals("-stacktrace")) {
op = new StackTrace();
} else if (split[0].equals("-exit")) {
diff --git a/test/118-noimage-dex2oat/run b/test/118-noimage-dex2oat/run
index 4b1d0ce..07bdb08 100644
--- a/test/118-noimage-dex2oat/run
+++ b/test/118-noimage-dex2oat/run
@@ -41,7 +41,6 @@
bpath="${framework}/core-libart${bpath_suffix}.jar"
bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
-bpath="${bpath}:${framework}/core-junit${bpath_suffix}.jar"
bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
bpath_arg="--runtime-option -Xbootclasspath:${bpath}"
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index 6e7ba40..3e6d1f4 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -127,7 +127,7 @@
}
- /// CHECK-START: void Main.constantIndexing2(int[]) BCE (before)
+ /// CHECK-START: void Main.$opt$noinline$constantIndexing2(int[]) BCE (before)
/// CHECK: BoundsCheck
/// CHECK: ArraySet
/// CHECK: BoundsCheck
@@ -137,7 +137,7 @@
/// CHECK: BoundsCheck
/// CHECK: ArraySet
- /// CHECK-START: void Main.constantIndexing2(int[]) BCE (after)
+ /// CHECK-START: void Main.$opt$noinline$constantIndexing2(int[]) BCE (after)
/// CHECK: LessThanOrEqual
/// CHECK: Deoptimize
/// CHECK-NOT: BoundsCheck
@@ -151,12 +151,15 @@
/// CHECK: BoundsCheck
/// CHECK: ArraySet
- static void constantIndexing2(int[] array) {
+ static void $opt$noinline$constantIndexing2(int[] array) {
array[1] = 1;
array[2] = 1;
array[3] = 1;
array[4] = 1;
array[-1] = 1;
+ if (array[1] == 1) {
+ throw new Error("");
+ }
}
@@ -655,10 +658,10 @@
try {
assertIsManaged();
// This will cause AIOOBE.
- constantIndexing2(new int[3]);
+ $opt$noinline$constantIndexing2(new int[3]);
} catch (ArrayIndexOutOfBoundsException e) {
assertIsManaged(); // This is to ensure that single-frame deoptimization works.
- // Will need to be updated if constantIndexing2 is inlined.
+ // Will need to be updated if $opt$noinline$constantIndexing2 is inlined.
try {
// This will cause AIOOBE.
constantIndexingForward6(new int[3]);
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index fd4dd5e..92cf807 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -722,22 +722,6 @@
}
}
- /// CHECK-START: void Main.testLoopPhisWithNullAndCrossUses(boolean) ssa_builder (after)
- /// CHECK-DAG: <<Null:l\d+>> NullConstant
- /// CHECK-DAG: <<PhiA:l\d+>> Phi [<<Null>>,<<PhiB:l\d+>>,<<PhiA>>] klass:java.lang.Object exact:false
- /// CHECK-DAG: <<PhiB>> Phi [<<Null>>,<<PhiB>>,<<PhiA>>] klass:java.lang.Object exact:false
- private void testLoopPhisWithNullAndCrossUses(boolean cond) {
- Main a = null;
- Main b = null;
- while (a == null) {
- if (cond) {
- a = b;
- } else {
- b = a;
- }
- }
- }
-
/// CHECK-START: java.lang.Object[] Main.testInstructionsWithUntypedParent() ssa_builder (after)
/// CHECK-DAG: <<Null:l\d+>> NullConstant
/// CHECK-DAG: <<LoopPhi:l\d+>> Phi [<<Null>>,<<Phi:l\d+>>] klass:java.lang.Object[] exact:true
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 375a3fc..4f89e91 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -40,15 +40,17 @@
uint32_t value = 0;
CHECK(GetVReg(m, 0, kIntVReg, &value));
CHECK_EQ(value, 42u);
- } else if (m_name.compare("testIntervalHole") == 0) {
+ } else if (m_name.compare("$opt$noinline$testIntervalHole") == 0) {
+ uint32_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
+ uint32_t dex_register_of_first_parameter = number_of_dex_registers - 2;
found_method_ = true;
uint32_t value = 0;
if (GetCurrentQuickFrame() != nullptr &&
GetCurrentOatQuickMethodHeader()->IsOptimized() &&
!Runtime::Current()->IsDebuggable()) {
- CHECK_EQ(GetVReg(m, 0, kIntVReg, &value), false);
+ CHECK_EQ(GetVReg(m, dex_register_of_first_parameter, kIntVReg, &value), false);
} else {
- CHECK(GetVReg(m, 0, kIntVReg, &value));
+ CHECK(GetVReg(m, dex_register_of_first_parameter, kIntVReg, &value));
CHECK_EQ(value, 1u);
}
}
diff --git a/test/466-get-live-vreg/src/Main.java b/test/466-get-live-vreg/src/Main.java
index d036a24..1903260 100644
--- a/test/466-get-live-vreg/src/Main.java
+++ b/test/466-get-live-vreg/src/Main.java
@@ -31,7 +31,7 @@
}
}
- static void testIntervalHole(int arg, boolean test) {
+ static void $opt$noinline$testIntervalHole(int arg, boolean test) {
// Move the argument to callee save to ensure it is in
// a readable register.
moveArgToCalleeSave();
@@ -44,6 +44,9 @@
// The environment use of `arg` should not make it live.
doStaticNativeCallLiveVreg();
}
+ if (staticField1 == 2) {
+ throw new Error("");
+ }
}
static native void doStaticNativeCallLiveVreg();
@@ -67,7 +70,7 @@
static void testWrapperIntervalHole(int arg, boolean test) {
try {
Thread.sleep(0);
- testIntervalHole(arg, test);
+ $opt$noinline$testIntervalHole(arg, test);
} catch (Exception e) {
throw new Error(e);
}
diff --git a/test/476-checker-ctor-memory-barrier/src/Main.java b/test/476-checker-ctor-memory-barrier/src/Main.java
index 41bec05..c2a2a10 100644
--- a/test/476-checker-ctor-memory-barrier/src/Main.java
+++ b/test/476-checker-ctor-memory-barrier/src/Main.java
@@ -25,13 +25,14 @@
class ClassWithFinals {
public final int x;
public ClassWithFinals obj;
+ public static boolean doThrow = false;
/// CHECK-START: void ClassWithFinals.<init>(boolean) register (after)
/// CHECK: MemoryBarrier kind:StoreStore
/// CHECK-NEXT: ReturnVoid
public ClassWithFinals(boolean cond) {
x = 0;
- if (cond) {
+ if (doThrow) {
// avoid inlining
throw new RuntimeException();
}
diff --git a/test/529-checker-unresolved/expected.txt b/test/529-checker-unresolved/expected.txt
index 1e7dbfe..1590a2a 100644
--- a/test/529-checker-unresolved/expected.txt
+++ b/test/529-checker-unresolved/expected.txt
@@ -5,3 +5,6 @@
UnresolvedClass.superMethod()
instanceof ok
checkcast ok
+UnresolvedClass.directCall()
+UnresolvedClass.directCall()
+UnresolvedClass.directCall()
diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java
index 5219c04..872fa6d 100644
--- a/test/529-checker-unresolved/src/Main.java
+++ b/test/529-checker-unresolved/src/Main.java
@@ -138,6 +138,27 @@
callUnresolvedInstanceFieldAccess(c);
testInstanceOf(m);
testCheckCast(m);
+ testLicm(2);
+ }
+
+ /// CHECK-START: void Main.testLicm(int) licm (before)
+ /// CHECK: <<Class:l\d+>> LoadClass loop:B2
+ /// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:B2
+ /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2
+ /// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2
+
+ /// CHECK-START: void Main.testLicm(int) licm (after)
+ /// CHECK: <<Class:l\d+>> LoadClass loop:none
+ /// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:none
+ /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2
+ /// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2
+ static public void testLicm(int count) {
+ // Test to make sure we keep the initialization check after loading an unresolved class.
+ UnresolvedClass c;
+ int i = 0;
+ do {
+ c = new UnresolvedClass();
+ } while (i++ != count);
}
public static void expectEquals(byte expected, byte result) {
diff --git a/test/552-checker-primitive-typeprop/smali/ArraySet.smali b/test/552-checker-primitive-typeprop/smali/ArraySet.smali
new file mode 100644
index 0000000..57d8606
--- /dev/null
+++ b/test/552-checker-primitive-typeprop/smali/ArraySet.smali
@@ -0,0 +1,51 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LArraySet;
+.super Ljava/lang/Object;
+
+# Test ArraySet on int[] and float[] arrays. The input should be typed accordingly.
+# Note that the input is a Phi to make sure primitive type propagation is re-run
+# on the replaced inputs.
+
+## CHECK-START: void ArraySet.ambiguousSet(int[], float[], boolean) ssa_builder (after)
+## CHECK-DAG: <<IntArray:l\d+>> ParameterValue klass:int[]
+## CHECK-DAG: <<IntA:i\d+>> IntConstant 0
+## CHECK-DAG: <<IntB:i\d+>> IntConstant 1073741824
+## CHECK-DAG: <<IntPhi:i\d+>> Phi [<<IntA>>,<<IntB>>] reg:0
+## CHECK-DAG: <<IntNC:l\d+>> NullCheck [<<IntArray>>]
+## CHECK-DAG: ArraySet [<<IntNC>>,{{i\d+}},<<IntPhi>>]
+
+## CHECK-DAG: <<FloatArray:l\d+>> ParameterValue klass:float[]
+## CHECK-DAG: <<FloatA:f\d+>> FloatConstant 0
+## CHECK-DAG: <<FloatB:f\d+>> FloatConstant 2
+## CHECK-DAG: <<FloatPhi:f\d+>> Phi [<<FloatA>>,<<FloatB>>] reg:0
+## CHECK-DAG: <<FloatNC:l\d+>> NullCheck [<<FloatArray>>]
+## CHECK-DAG: ArraySet [<<FloatNC>>,{{i\d+}},<<FloatPhi>>]
+
+.method public static ambiguousSet([I[FZ)V
+ .registers 8
+
+ const v0, 0x0
+ if-eqz p2, :else
+ const v0, 0x40000000
+ :else
+ # v0 = Phi [0.0f, 2.0f]
+
+ const v1, 0x1
+ aput v0, p0, v1
+ aput v0, p1, v1
+
+ return-void
+.end method
diff --git a/test/554-checker-rtp-checkcast/expected.txt b/test/554-checker-rtp-checkcast/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/554-checker-rtp-checkcast/expected.txt
diff --git a/test/554-checker-rtp-checkcast/info.txt b/test/554-checker-rtp-checkcast/info.txt
new file mode 100644
index 0000000..2a60971
--- /dev/null
+++ b/test/554-checker-rtp-checkcast/info.txt
@@ -0,0 +1 @@
+Tests that phis with check-casted reference type inputs are typed.
diff --git a/test/554-checker-rtp-checkcast/src/Main.java b/test/554-checker-rtp-checkcast/src/Main.java
new file mode 100644
index 0000000..607f71a
--- /dev/null
+++ b/test/554-checker-rtp-checkcast/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+public class Main {
+
+ public static Object returnIntArray() { return new int[10]; }
+
+ /// CHECK-START: void Main.boundTypeForMergingPhi() ssa_builder (after)
+ /// CHECK-DAG: ArraySet [<<NC:l\d+>>,{{i\d+}},{{i\d+}}]
+ /// CHECK-DAG: <<NC>> NullCheck [<<Phi:l\d+>>]
+ /// CHECK-DAG: <<Phi>> Phi klass:int[]
+
+ public static void boundTypeForMergingPhi() {
+ int[] array = new int[20];
+ if (array.hashCode() > 5) {
+ array = (int[]) returnIntArray();
+ }
+ array[0] = 14;
+ }
+
+ /// CHECK-START: void Main.boundTypeForLoopPhi() ssa_builder (after)
+ /// CHECK-DAG: ArraySet [<<NC:l\d+>>,{{i\d+}},{{i\d+}}]
+ /// CHECK-DAG: <<NC>> NullCheck [<<Phi:l\d+>>]
+ /// CHECK-DAG: <<Phi>> Phi klass:int[]
+
+ public static void boundTypeForLoopPhi() {
+ int[] array = new int[20];
+ int i = 0;
+ while (i < 4) {
+ ++i;
+ array[i] = i;
+ if (i > 2) {
+ array = (int[]) returnIntArray();
+ }
+ }
+ array[0] = 14;
+ }
+
+ /// CHECK-START: void Main.boundTypeForCatchPhi() ssa_builder (after)
+ /// CHECK-DAG: ArraySet [<<NC:l\d+>>,{{i\d+}},{{i\d+}}]
+ /// CHECK-DAG: <<NC>> NullCheck [<<Phi:l\d+>>]
+ /// CHECK-DAG: <<Phi>> Phi is_catch_phi:true klass:int[]
+
+ public static void boundTypeForCatchPhi() {
+ int[] array1 = new int[20];
+ int[] array2 = (int[]) returnIntArray();
+
+ int[] catch_phi = array1;
+ try {
+ System.nanoTime();
+ catch_phi = array2;
+ System.nanoTime();
+ } catch (Throwable ex) {
+ catch_phi[0] = 14;
+ }
+ }
+
+ public static void main(String[] args) { }
+}
diff --git a/test/557-checker-ref-equivalent/expected.txt b/test/557-checker-ref-equivalent/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/557-checker-ref-equivalent/expected.txt
diff --git a/test/557-checker-ref-equivalent/info.txt b/test/557-checker-ref-equivalent/info.txt
new file mode 100644
index 0000000..30e763b
--- /dev/null
+++ b/test/557-checker-ref-equivalent/info.txt
@@ -0,0 +1 @@
+Checker tests to ensure we do not get reference and integer phi equivalents.
diff --git a/test/557-checker-ref-equivalent/smali/TestCase.smali b/test/557-checker-ref-equivalent/smali/TestCase.smali
new file mode 100644
index 0000000..2472957
--- /dev/null
+++ b/test/557-checker-ref-equivalent/smali/TestCase.smali
@@ -0,0 +1,51 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+
+.super Ljava/lang/Object;
+
+## CHECK-START: void TestCase.testIntRefEquivalent() ssa_builder (after)
+## CHECK-NOT: Phi
+.method public static testIntRefEquivalent()V
+ .registers 4
+
+ const v0, 0
+
+ :try_start
+ invoke-static {v0,v0}, LTestCase;->foo(ILjava/lang/Object;)V
+ if-eqz v0, :end_if
+ const v0, 0
+ :end_if
+ invoke-static {v0,v0}, LTestCase;->foo(ILjava/lang/Object;)V
+ goto :no_catch
+ :try_end
+
+ .catch Ljava/lang/Exception; {:try_start .. :try_end} :exception
+ :exception
+ # We used to have a reference and an integer phi equivalents here, which
+ # broke the invariant of not sharing the same spill slot between those two
+ # types.
+ invoke-static {v0,v0}, LTestCase;->foo(ILjava/lang/Object;)V
+
+ :no_catch
+ goto :try_start
+ return-void
+
+.end method
+
+.method public static foo(ILjava/lang/Object;)V
+ .registers 4
+ return-void
+.end method
diff --git a/test/557-checker-ref-equivalent/src/Main.java b/test/557-checker-ref-equivalent/src/Main.java
new file mode 100644
index 0000000..a970af5
--- /dev/null
+++ b/test/557-checker-ref-equivalent/src/Main.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /// CHECK-START: void Main.testRedundantPhiCycle(boolean) ssa_builder (after)
+ /// CHECK-NOT: Phi
+ private void testRedundantPhiCycle(boolean cond) {
+ Object o = null;
+ while (true) {
+ if (cond) {
+ o = null;
+ }
+ System.out.println(o);
+ }
+ }
+
+ /// CHECK-START: void Main.testLoopPhisWithNullAndCrossUses(boolean) ssa_builder (after)
+ /// CHECK-NOT: Phi
+ private void testLoopPhisWithNullAndCrossUses(boolean cond) {
+ Main a = null;
+ Main b = null;
+ while (a == null) {
+ if (cond) {
+ a = b;
+ } else {
+ b = a;
+ }
+ }
+ }
+
+ public static void main(String[] args) {
+ }
+}
diff --git a/test/559-checker-rtp-ifnotnull/expected.txt b/test/559-checker-rtp-ifnotnull/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/559-checker-rtp-ifnotnull/expected.txt
diff --git a/test/559-checker-rtp-ifnotnull/info.txt b/test/559-checker-rtp-ifnotnull/info.txt
new file mode 100644
index 0000000..c08aa0c
--- /dev/null
+++ b/test/559-checker-rtp-ifnotnull/info.txt
@@ -0,0 +1,2 @@
+Tests that BoundType created for if-not-null does not force untyped loop phis
+to Object.
\ No newline at end of file
diff --git a/test/559-checker-rtp-ifnotnull/src/Main.java b/test/559-checker-rtp-ifnotnull/src/Main.java
new file mode 100644
index 0000000..8f40129
--- /dev/null
+++ b/test/559-checker-rtp-ifnotnull/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+public class Main {
+
+ /// CHECK-START: void Main.boundTypeForIfNotNull() ssa_builder (after)
+ /// CHECK-DAG: <<Method:(i|j)\d+>> CurrentMethod
+ /// CHECK-DAG: <<Null:l\d+>> NullConstant
+ /// CHECK-DAG: <<Cst5:i\d+>> IntConstant 5
+ /// CHECK-DAG: <<Cst10:i\d+>> IntConstant 10
+
+ /// CHECK-DAG: InvokeVirtual [<<NullCheck:l\d+>>]
+ /// CHECK-DAG: <<NullCheck>> NullCheck [<<LoopPhi:l\d+>>] klass:int[]
+ /// CHECK-DAG: <<LoopPhi>> Phi [<<Null>>,<<MergePhi:l\d+>>] klass:int[]
+
+ /// CHECK-DAG: <<BoundType:l\d+>> BoundType [<<LoopPhi>>] klass:int[] can_be_null:false
+ /// CHECK-DAG: <<NewArray10:l\d+>> NewArray [<<Cst10>>,<<Method>>] klass:int[]
+ /// CHECK-DAG: <<NotNullPhi:l\d+>> Phi [<<BoundType>>,<<NewArray10>>] klass:int[]
+
+ /// CHECK-DAG: <<NewArray5:l\d+>> NewArray [<<Cst5>>,<<Method>>] klass:int[]
+ /// CHECK-DAG: <<MergePhi>> Phi [<<NewArray5>>,<<NotNullPhi>>] klass:int[]
+
+ public static void boundTypeForIfNotNull() {
+ int[] array = null;
+ for (int i = -1; i < 10; ++i) {
+ if (array == null) {
+ array = new int[5];
+ } else {
+ if (i == 5) {
+ array = new int[10];
+ }
+ array[i] = i;
+ }
+ }
+ array.hashCode();
+ }
+
+ public static void main(String[] args) { }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 81cfb70..8c5ff0a 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -222,6 +222,7 @@
# Tests that are timing sensitive and flaky on heavily loaded systems.
TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
+ 002-sleep \
053-wait-some \
055-enum-performance \
133-static-invoke-super
diff --git a/test/run-test b/test/run-test
index d076687..ec34e09 100755
--- a/test/run-test
+++ b/test/run-test
@@ -462,7 +462,7 @@
if [ "$runtime" = "dalvik" ]; then
if [ "$target_mode" = "no" ]; then
framework="${ANDROID_PRODUCT_OUT}/system/framework"
- bpath="${framework}/core-libart.jar:${framework}/core-oj.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/core-junit.jar:${framework}/bouncycastle.jar:${framework}/ext.jar"
+ bpath="${framework}/core-libart.jar:${framework}/core-oj.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/bouncycastle.jar:${framework}/ext.jar"
run_args="${run_args} --boot -Xbootclasspath:${bpath}"
else
true # defaults to using target BOOTCLASSPATH
@@ -509,7 +509,6 @@
bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar"
bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
- bpath="${bpath}:${framework}/core-junit${bpath_suffix}.jar"
bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
# Pass down the bootclasspath
run_args="${run_args} --runtime-option -Xbootclasspath:${bpath}"
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index adc4d03..a3ecf86 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -23,8 +23,6 @@
- Make sortable by clicking on headers.
* For HeapTable with single heap shown, the heap name isn't centered?
* Consistently document functions.
- * Should help be part of an AhatHandler, that automatically gets the menu and
- stylesheet link rather than duplicating that?
* Show version number with --version.
* Show somewhere where to send bugs.
* Include a link to /objects in the overview and menu?
@@ -79,6 +77,12 @@
* Instance.isRoot and Instance.getRootTypes.
Release History:
+ 0.3 Dec 15, 2015
+ Fix page loading performance by showing a limited number of entries by default.
+ Fix mismatch between overview and "roots" totals.
+ Annotate root objects and show their types.
+ Annotate references with their referents.
+
0.2 Oct 20, 2015
Take into account 'count' and 'offset' when displaying strings.
diff --git a/tools/ahat/src/AhatHttpHandler.java b/tools/ahat/src/AhatHttpHandler.java
index 178747c..1d05a66 100644
--- a/tools/ahat/src/AhatHttpHandler.java
+++ b/tools/ahat/src/AhatHttpHandler.java
@@ -41,15 +41,7 @@
PrintStream ps = new PrintStream(exchange.getResponseBody());
try {
HtmlDoc doc = new HtmlDoc(ps, DocString.text("ahat"), DocString.uri("style.css"));
- DocString menu = new DocString();
- menu.appendLink(DocString.uri("/"), DocString.text("overview"));
- menu.append(" - ");
- menu.appendLink(DocString.uri("rooted"), DocString.text("rooted"));
- menu.append(" - ");
- menu.appendLink(DocString.uri("sites"), DocString.text("allocations"));
- menu.append(" - ");
- menu.appendLink(DocString.uri("help"), DocString.text("help"));
- doc.menu(menu);
+ doc.menu(Menu.getMenu());
mAhatHandler.handle(doc, new Query(exchange.getRequestURI()));
doc.close();
} catch (RuntimeException e) {
diff --git a/tools/ahat/src/HelpHandler.java b/tools/ahat/src/HelpHandler.java
new file mode 100644
index 0000000..8de3c85
--- /dev/null
+++ b/tools/ahat/src/HelpHandler.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+import com.google.common.io.ByteStreams;
+import com.sun.net.httpserver.HttpExchange;
+import com.sun.net.httpserver.HttpHandler;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.PrintStream;
+
+/**
+ * HelpHandler.
+ *
+ * HttpHandler to show the help page.
+ */
+class HelpHandler implements HttpHandler {
+
+ @Override
+ public void handle(HttpExchange exchange) throws IOException {
+ ClassLoader loader = HelpHandler.class.getClassLoader();
+ exchange.getResponseHeaders().add("Content-Type", "text/html;charset=utf-8");
+ exchange.sendResponseHeaders(200, 0);
+ PrintStream ps = new PrintStream(exchange.getResponseBody());
+ HtmlDoc doc = new HtmlDoc(ps, DocString.text("ahat"), DocString.uri("style.css"));
+ doc.menu(Menu.getMenu());
+
+ InputStream is = loader.getResourceAsStream("help.html");
+ if (is == null) {
+ ps.println("No help available.");
+ } else {
+ ByteStreams.copy(is, ps);
+ }
+
+ doc.close();
+ ps.close();
+ }
+}
diff --git a/tools/ahat/src/Main.java b/tools/ahat/src/Main.java
index ebd49d7..091820f 100644
--- a/tools/ahat/src/Main.java
+++ b/tools/ahat/src/Main.java
@@ -79,7 +79,7 @@
server.createContext("/objects", new AhatHttpHandler(new ObjectsHandler(ahat)));
server.createContext("/site", new AhatHttpHandler(new SiteHandler(ahat)));
server.createContext("/bitmap", new BitmapHandler(ahat));
- server.createContext("/help", new StaticHandler("help.html", "text/html"));
+ server.createContext("/help", new HelpHandler());
server.createContext("/style.css", new StaticHandler("style.css", "text/css"));
server.setExecutor(Executors.newFixedThreadPool(1));
System.out.println("Server started on localhost:" + port);
diff --git a/tools/ahat/src/Menu.java b/tools/ahat/src/Menu.java
new file mode 100644
index 0000000..018e019
--- /dev/null
+++ b/tools/ahat/src/Menu.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+/**
+ * A menu showed in the UI that can be used to jump to common pages.
+ */
+class Menu {
+ private static DocString mMenu =
+ DocString.link(DocString.uri("/"), DocString.text("overview"))
+ .append(" - ")
+ .appendLink(DocString.uri("rooted"), DocString.text("rooted"))
+ .append(" - ")
+ .appendLink(DocString.uri("sites"), DocString.text("allocations"))
+ .append(" - ")
+ .appendLink(DocString.uri("help"), DocString.text("help"));
+
+ /**
+ * Returns the menu as a DocString.
+ */
+ public static DocString getMenu() {
+ return mMenu;
+ }
+}
diff --git a/tools/ahat/src/OverviewHandler.java b/tools/ahat/src/OverviewHandler.java
index 0fe4fba..720fcb4 100644
--- a/tools/ahat/src/OverviewHandler.java
+++ b/tools/ahat/src/OverviewHandler.java
@@ -48,14 +48,7 @@
doc.section("Heap Sizes");
printHeapSizes(doc, query);
-
- DocString menu = new DocString();
- menu.appendLink(DocString.uri("rooted"), DocString.text("Rooted"));
- menu.append(" - ");
- menu.appendLink(DocString.uri("site"), DocString.text("Allocations"));
- menu.append(" - ");
- menu.appendLink(DocString.uri("help"), DocString.text("Help"));
- doc.big(menu);
+ doc.big(Menu.getMenu());
}
private void printHeapSizes(Doc doc, Query query) {
diff --git a/tools/ahat/src/help.html b/tools/ahat/src/help.html
index 92ec37d..ff04ad2 100644
--- a/tools/ahat/src/help.html
+++ b/tools/ahat/src/help.html
@@ -14,17 +14,6 @@
limitations under the License.
-->
-<head>
-<link rel="stylesheet" type="text/css" href="style.css">
-</head>
-
-<div class="menu">
- <a href="/">overview</a> -
- <a href="rooted">rooted</a> -
- <a href="sites">allocations</a> -
- <a href="help">help</a>
-</div>
-
<h1>Help</h1>
<h2>Information shown by ahat:</h2>
<ul>
diff --git a/tools/ahat/src/manifest.txt b/tools/ahat/src/manifest.txt
index 421de17..368b744 100644
--- a/tools/ahat/src/manifest.txt
+++ b/tools/ahat/src/manifest.txt
@@ -1,4 +1,4 @@
Name: ahat/
Implementation-Title: ahat
-Implementation-Version: 0.3
+Implementation-Version: 0.4
Main-Class: com.android.ahat.Main
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 880be26..d11e015 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -183,7 +183,6 @@
names: ["libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeDateTimeStringDST",
"libcore.java.lang.OldSystemTest#test_load",
"libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits",
- "libcore.java.text.NumberFormatTest#test_customCurrencySymbol",
"libcore.java.text.NumberFormatTest#test_setCurrency",
"libcore.java.text.OldNumberFormatTest#test_getIntegerInstanceLjava_util_Locale",
"libcore.java.util.CalendarTest#testAddOneDayAndOneDayOver30MinuteDstForwardAdds48Hours",
@@ -192,8 +191,34 @@
"libcore.java.util.CalendarTest#test_nullLocale",
"libcore.java.util.FormatterTest#test_numberLocalization",
"libcore.java.util.FormatterTest#test_uppercaseConversions",
- "libcore.java.util.TimeZoneTest#testTimeZoneIDLocalization",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testClear",
+ "libcore.javax.crypto.CipherTest#testCipher_getInstance_WrongType_Failure",
+ "libcore.javax.crypto.CipherTest#testDecryptBufferZeroSize_mustDecodeToEmptyString",
+ "libcore.javax.security.auth.x500.X500PrincipalTest#testExceptionsForWrongDNs",
+ "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getDate",
+ "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getExpiration",
+ "org.apache.harmony.regex.tests.java.util.regex.PatternSyntaxExceptionTest#testPatternSyntaxException",
+ "org.apache.harmony.tests.java.lang.FloatTest#test_parseFloat_LString_Harmony6261",
+ "org.apache.harmony.tests.java.lang.ThreadTest#test_isDaemon",
+ "org.apache.harmony.tests.java.text.DecimalFormatSymbolsTest#test_setInternationalCurrencySymbolLjava_lang_String",
+ "org.apache.harmony.tests.java.text.DecimalFormatTest#testSerializationHarmonyRICompatible",
+ "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition",
+ "org.apache.harmony.tests.java.util.jar.JarFileTest#test_getInputStreamLjava_util_jar_JarEntry_subtest0",
+ "libcore.java.util.CalendarTest#test_clear_45877",
+ "org.apache.harmony.crypto.tests.javax.crypto.spec.SecretKeySpecTest#testGetFormat",
+ "org.apache.harmony.tests.java.util.TimerTaskTest#test_scheduledExecutionTime"]
+},
+{
+ description: "'cat -' does not work anymore",
+ result: EXEC_FAILED,
+ bug: 26395656,
+ modes: [device],
+ names: ["org.apache.harmony.tests.java.lang.ProcessTest#test_getOutputStream"]
+},
+{
+ description: "Missing resource in classpath",
+ result: EXEC_FAILED,
+ modes: [device],
+ names: ["libcore.java.util.prefs.OldAbstractPreferencesTest#testClear",
"libcore.java.util.prefs.OldAbstractPreferencesTest#testExportNode",
"libcore.java.util.prefs.OldAbstractPreferencesTest#testExportSubtree",
"libcore.java.util.prefs.OldAbstractPreferencesTest#testGet",
@@ -217,37 +242,11 @@
"libcore.java.util.prefs.OldAbstractPreferencesTest#testSync",
"libcore.java.util.prefs.PreferencesTest#testHtmlEncoding",
"libcore.java.util.prefs.PreferencesTest#testPreferencesClobbersExistingFiles",
- "libcore.javax.crypto.CipherTest#testCipher_getInstance_WrongType_Failure",
- "libcore.javax.crypto.CipherTest#testDecryptBufferZeroSize_mustDecodeToEmptyString",
- "libcore.javax.security.auth.x500.X500PrincipalTest#testExceptionsForWrongDNs",
- "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getDate",
- "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getExpiration",
- "org.apache.harmony.regex.tests.java.util.regex.PatternSyntaxExceptionTest#testPatternSyntaxException",
- "org.apache.harmony.tests.java.lang.Character_UnicodeBlockTest#test_forNameLjava_lang_StringExceptions",
- "org.apache.harmony.tests.java.lang.FloatTest#test_parseFloat_LString_Harmony6261",
- "org.apache.harmony.tests.java.lang.ThreadTest#test_isDaemon",
- "org.apache.harmony.tests.java.text.DecimalFormatSymbolsTest#test_setInternationalCurrencySymbolLjava_lang_String",
- "org.apache.harmony.tests.java.text.DecimalFormatTest#testSerializationHarmonyRICompatible",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition",
- "org.apache.harmony.tests.java.util.CalendarTest#test_getDisplayNamesIILjava_util_Locale",
"org.apache.harmony.tests.java.util.PropertiesTest#test_storeToXMLLjava_io_OutputStreamLjava_lang_StringLjava_lang_String",
- "org.apache.harmony.tests.java.util.jar.JarFileTest#test_getInputStreamLjava_util_jar_JarEntry_subtest0",
"org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportNode",
"org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportSubtree",
"org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testFlush",
"org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testSync",
- "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet",
- "libcore.java.util.CalendarTest#test_clear_45877",
- "org.apache.harmony.crypto.tests.javax.crypto.spec.SecretKeySpecTest#testGetFormat",
- "org.apache.harmony.tests.java.util.TimerTaskTest#test_scheduledExecutionTime"]
-},
-{
- description: "Failing tests after enso move, only on arm32",
- result: EXEC_FAILED,
- bug: 26353151,
- modes_variants: [[device, X32]],
- names: ["org.apache.harmony.tests.java.text.DecimalFormatTest#test_formatDouble_withFieldPosition",
- "org.apache.harmony.tests.java.text.DecimalFormatTest#test_formatToCharacterIterator_original"]
+ "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"]
}
-
]