Merge "bce: add support to narrow two MonotonicValueRange's at the same time."
diff --git a/.gitignore b/.gitignore
index 1cdfed9..3d1658d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,3 +2,4 @@
USE_PORTABLE_COMPILER
SMALL_ART
SEA_IR_ART
+JIT_ART
diff --git a/Android.mk b/Android.mk
index 76c3aa5..c740a0d 100644
--- a/Android.mk
+++ b/Android.mk
@@ -202,6 +202,11 @@
test-art-host-interpreter: test-art-host-run-test-interpreter
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+# All host tests that run solely on the jit.
+.PHONY: test-art-host-jit
+test-art-host-jit: test-art-host-run-test-jit
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
# Primary host architecture variants:
.PHONY: test-art-host$(ART_PHONY_TEST_HOST_SUFFIX)
test-art-host$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-gtest$(ART_PHONY_TEST_HOST_SUFFIX) \
@@ -220,6 +225,10 @@
test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+.PHONY: test-art-host-jit$(ART_PHONY_TEST_HOST_SUFFIX)
+test-art-host-jit$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$(ART_PHONY_TEST_HOST_SUFFIX)
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
# Secondary host architecture variants:
ifneq ($(HOST_PREFER_32_BIT),true)
.PHONY: test-art-host$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
@@ -238,6 +247,10 @@
.PHONY: test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
+.PHONY: test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
endif
# Valgrind. Currently only 32b gtests.
@@ -268,6 +281,11 @@
test-art-target-interpreter: test-art-target-run-test-interpreter
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+# All target tests that run solely on the jit.
+.PHONY: test-art-target-jit
+test-art-target-jit: test-art-target-run-test-jit
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
# Primary target architecture variants:
.PHONY: test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX)
test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(ART_PHONY_TEST_TARGET_SUFFIX) \
@@ -286,6 +304,10 @@
test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+.PHONY: test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(ART_PHONY_TEST_TARGET_SUFFIX)
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
# Secondary target architecture variants:
ifdef TARGET_2ND_ARCH
.PHONY: test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
@@ -304,6 +326,10 @@
.PHONY: test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
+.PHONY: test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
endif
endif # art_test_bother
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 3000cdf..6a83e72 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -48,6 +48,18 @@
endif
#
+# Used to enable JIT
+#
+ART_JIT := false
+ifneq ($(wildcard art/JIT_ART),)
+$(info Enabling ART_JIT because of existence of art/JIT_ART)
+ART_JIT := true
+endif
+ifeq ($(WITH_ART_JIT), true)
+ART_JIT := true
+endif
+
+#
# Used to enable smart mode
#
ART_SMALL_MODE := false
@@ -62,8 +74,8 @@
#
# Used to change the default GC. Valid values are CMS, SS, GSS. The default is CMS.
#
-art_default_gc_type ?= CMS
-art_default_gc_type_cflags := -DART_DEFAULT_GC_TYPE_IS_$(art_default_gc_type)
+ART_DEFAULT_GC_TYPE ?= CMS
+art_default_gc_type_cflags := -DART_DEFAULT_GC_TYPE_IS_$(ART_DEFAULT_GC_TYPE)
ART_HOST_CFLAGS :=
ART_TARGET_CFLAGS :=
@@ -191,6 +203,7 @@
-Wunreachable-code \
-Wredundant-decls \
-Wshadow \
+ -Wunused \
-fvisibility=protected \
$(art_default_gc_type_cflags)
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index da50d53..547e92e 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -46,6 +46,9 @@
# Do you want interpreter tests run?
ART_TEST_INTERPRETER ?= $(ART_TEST_FULL)
+# Do you want JIT tests run?
+ART_TEST_JIT ?= $(ART_TEST_FULL)
+
# Do you want optimizing compiler tests run?
ART_TEST_OPTIMIZING ?= $(ART_TEST_FULL)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index c576d1b..7ab4d64 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -116,6 +116,7 @@
runtime/entrypoints_order_test.cc \
runtime/exception_test.cc \
runtime/gc/accounting/card_table_test.cc \
+ runtime/gc/accounting/mod_union_table_test.cc \
runtime/gc/accounting/space_bitmap_test.cc \
runtime/gc/heap_test.cc \
runtime/gc/reference_queue_test.cc \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 8d49565..4d2fa41 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -31,7 +31,7 @@
endif
# Use dex2oat debug version for better error reporting
-# $(1): compiler - default, optimizing or interpreter.
+# $(1): compiler - default, optimizing, jit or interpreter.
# $(2): pic/no-pic
# $(3): 2ND_ or undefined, 2ND_ for 32-bit host builds.
# $(4): wrapper, e.g., valgrind.
@@ -67,9 +67,9 @@
ifeq ($(1),default)
# Default has no infix, no compile options.
endif
- ifneq ($(filter-out default interpreter optimizing,$(1)),)
+ ifneq ($(filter-out default interpreter jit optimizing,$(1)),)
#Technically this test is not precise, but hopefully good enough.
- $$(error found $(1) expected default, interpreter or optimizing)
+ $$(error found $(1) expected default, interpreter, jit or optimizing)
endif
ifeq ($(2),pic)
@@ -127,7 +127,7 @@
core_pic_infix :=
endef # create-core-oat-host-rules
-# $(1): compiler - default, optimizing or interpreter.
+# $(1): compiler - default, optimizing, jit or interpreter.
# $(2): wrapper.
# $(3): dex2oat suffix.
define create-core-oat-host-rule-combination
@@ -181,9 +181,9 @@
ifeq ($(1),default)
# Default has no infix, no compile options.
endif
- ifneq ($(filter-out default interpreter optimizing,$(1)),)
+ ifneq ($(filter-out default interpreter jit optimizing,$(1)),)
# Technically this test is not precise, but hopefully good enough.
- $$(error found $(1) expected default, interpreter or optimizing)
+ $$(error found $(1) expected default, interpreter, jit or optimizing)
endif
ifeq ($(2),pic)
@@ -245,7 +245,7 @@
core_pic_infix :=
endef # create-core-oat-target-rules
-# $(1): compiler - default, optimizing or interpreter.
+# $(1): compiler - default, optimizing, jit or interpreter.
# $(2): wrapper.
# $(3): dex2oat suffix.
define create-core-oat-target-rule-combination
diff --git a/cmdline/cmdline_parse_result.h b/cmdline/cmdline_parse_result.h
index d6ac341..717642f 100644
--- a/cmdline/cmdline_parse_result.h
+++ b/cmdline/cmdline_parse_result.h
@@ -117,9 +117,9 @@
}
// Make sure copying is allowed
- CmdlineParseResult(const CmdlineParseResult& other) = default;
+ CmdlineParseResult(const CmdlineParseResult&) = default;
// Make sure moving is cheap
- CmdlineParseResult(CmdlineParseResult&& other) = default;
+ CmdlineParseResult(CmdlineParseResult&&) = default;
private:
explicit CmdlineParseResult(const T& value)
diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h
index a555356..e4af4f9 100644
--- a/cmdline/cmdline_parser.h
+++ b/cmdline/cmdline_parser.h
@@ -490,9 +490,9 @@
}
// Ensure we have a default move constructor.
- CmdlineParser(CmdlineParser&& other) = default;
+ CmdlineParser(CmdlineParser&&) = default;
// Ensure we have a default move assignment operator.
- CmdlineParser& operator=(CmdlineParser&& other) = default;
+ CmdlineParser& operator=(CmdlineParser&&) = default;
private:
friend struct Builder;
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index b740b41..18d8c7a 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -411,6 +411,26 @@
} // TEST_F
/*
+* -Xjit, -Xnojit, -Xjitcodecachesize, Xjitcompilethreshold
+*/
+TEST_F(CmdlineParserTest, TestJitOptions) {
+ /*
+ * Test successes
+ */
+ {
+ EXPECT_SINGLE_PARSE_VALUE(true, "-Xjit", M::UseJIT);
+ EXPECT_SINGLE_PARSE_VALUE(false, "-Xnojit", M::UseJIT);
+ }
+ {
+ EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(16 * KB), "-Xjitcodecachesize:16K", M::JITCodeCacheCapacity);
+ EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(16 * MB), "-Xjitcodecachesize:16M", M::JITCodeCacheCapacity);
+ }
+ {
+ EXPECT_SINGLE_PARSE_VALUE(12345u, "-Xjitthreshold:12345", M::JITCompileThreshold);
+ }
+} // TEST_F
+
+/*
* -X-profile-*
*/
TEST_F(CmdlineParserTest, TestProfilerOptions) {
@@ -495,9 +515,8 @@
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:abdef",
"-Xdexopt:foobar", "-Xnoquithandler", "-Xjnigreflimit:ixnay", "-Xgenregmap", "-Xnogenregmap",
"-Xverifyopt:never", "-Xcheckdexsum", "-Xincludeselectedop", "-Xjitop:noop",
- "-Xincludeselectedmethod", "-Xjitthreshold:123", "-Xjitcodecachesize:12345",
- "-Xjitblocking", "-Xjitmethod:_", "-Xjitclass:nosuchluck", "-Xjitoffset:none",
- "-Xjitconfig:yes", "-Xjitcheckcg", "-Xjitverbose", "-Xjitprofile",
+ "-Xincludeselectedmethod", "-Xjitblocking", "-Xjitmethod:_", "-Xjitclass:nosuchluck",
+ "-Xjitoffset:none", "-Xjitconfig:yes", "-Xjitcheckcg", "-Xjitverbose", "-Xjitprofile",
"-Xjitdisableopt", "-Xjitsuspendpoll", "-XX:mainThreadStackSize=1337"
};
diff --git a/cmdline/cmdline_result.h b/cmdline/cmdline_result.h
index bf3a85d..963dfc1 100644
--- a/cmdline/cmdline_result.h
+++ b/cmdline/cmdline_result.h
@@ -65,9 +65,9 @@
}
// Make sure copying exists
- CmdlineResult(const CmdlineResult& other) = default;
+ CmdlineResult(const CmdlineResult&) = default;
// Make sure moving is cheap
- CmdlineResult(CmdlineResult&& other) = default;
+ CmdlineResult(CmdlineResult&&) = default;
private:
const Status status_;
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 180baec..de99278 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -337,8 +337,8 @@
// Default constructors/copy-constructors.
MillisecondsToNanoseconds() : nanoseconds_(0ul) {}
- MillisecondsToNanoseconds(const MillisecondsToNanoseconds& rhs) = default;
- MillisecondsToNanoseconds(MillisecondsToNanoseconds&& rhs) = default;
+ MillisecondsToNanoseconds(const MillisecondsToNanoseconds&) = default;
+ MillisecondsToNanoseconds(MillisecondsToNanoseconds&&) = default;
private:
uint64_t nanoseconds_;
@@ -421,8 +421,8 @@
}
ParseStringList() = default;
- ParseStringList(const ParseStringList& rhs) = default;
- ParseStringList(ParseStringList&& rhs) = default;
+ ParseStringList(const ParseStringList&) = default;
+ ParseStringList(ParseStringList&&) = default;
private:
std::vector<std::string> list_;
@@ -585,6 +585,8 @@
log_verbosity.heap = true;
} else if (verbose_options[j] == "jdwp") {
log_verbosity.jdwp = true;
+ } else if (verbose_options[j] == "jit") {
+ log_verbosity.jit = true;
} else if (verbose_options[j] == "jni") {
log_verbosity.jni = true;
} else if (verbose_options[j] == "monitor") {
@@ -651,8 +653,8 @@
max_stack_depth_(0) {
}
- TestProfilerOptions(const TestProfilerOptions& other) = default;
- TestProfilerOptions(TestProfilerOptions&& other) = default;
+ TestProfilerOptions(const TestProfilerOptions&) = default;
+ TestProfilerOptions(TestProfilerOptions&&) = default;
};
static inline std::ostream& operator<<(std::ostream& stream, const TestProfilerOptions& options) {
diff --git a/cmdline/token_range.h b/cmdline/token_range.h
index 50c54fe..5b54384 100644
--- a/cmdline/token_range.h
+++ b/cmdline/token_range.h
@@ -90,10 +90,10 @@
}
// Non-copying copy constructor.
- TokenRange(const TokenRange& other) = default;
+ TokenRange(const TokenRange&) = default;
// Non-copying move constructor.
- TokenRange(TokenRange&& other) = default;
+ TokenRange(TokenRange&&) = default;
// Non-copying constructor. Retains reference to an existing list of tokens, with offset.
explicit TokenRange(std::shared_ptr<TokenList> token_list)
diff --git a/compiler/Android.mk b/compiler/Android.mk
index beb34dc..86a27c1 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -79,6 +79,7 @@
driver/compiler_driver.cc \
driver/compiler_options.cc \
driver/dex_compilation_unit.cc \
+ jit/jit_compiler.cc \
jni/quick/arm/calling_convention_arm.cc \
jni/quick/arm64/calling_convention_arm64.cc \
jni/quick/mips/calling_convention_mips.cc \
@@ -161,8 +162,7 @@
driver/compiler_options.h \
image_writer.h \
optimizing/locations.h \
- utils/arm/constants_arm.h \
- utils/dex_instruction_utils.h
+ utils/arm/constants_arm.h
# $(1): target or host
# $(2): ndebug or debug
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 1cd78f8..09be437 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -52,19 +52,19 @@
const SwapVector<uint8_t>* code = compiled_method->GetQuickCode();
uint32_t code_size = code->size();
CHECK_NE(0u, code_size);
- const SwapVector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
- uint32_t vmap_table_offset = vmap_table.empty() ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table.size();
+ const SwapVector<uint8_t>* vmap_table = compiled_method->GetVmapTable();
+ uint32_t vmap_table_offset = vmap_table->empty() ? 0u
+ : sizeof(OatQuickMethodHeader) + vmap_table->size();
const SwapVector<uint8_t>* mapping_table = compiled_method->GetMappingTable();
bool mapping_table_used = mapping_table != nullptr && !mapping_table->empty();
size_t mapping_table_size = mapping_table_used ? mapping_table->size() : 0U;
uint32_t mapping_table_offset = !mapping_table_used ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size;
+ : sizeof(OatQuickMethodHeader) + vmap_table->size() + mapping_table_size;
const SwapVector<uint8_t>* gc_map = compiled_method->GetGcMap();
bool gc_map_used = gc_map != nullptr && !gc_map->empty();
size_t gc_map_size = gc_map_used ? gc_map->size() : 0U;
uint32_t gc_map_offset = !gc_map_used ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size + gc_map_size;
+ : sizeof(OatQuickMethodHeader) + vmap_table->size() + mapping_table_size + gc_map_size;
OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset,
compiled_method->GetFrameSizeInBytes(),
compiled_method->GetCoreSpillMask(),
@@ -72,14 +72,14 @@
header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
- size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table_size +
+ size_t size = sizeof(method_header) + code_size + vmap_table->size() + mapping_table_size +
gc_map_size;
size_t code_offset = compiled_method->AlignCode(size - code_size);
size_t padding = code_offset - (size - code_size);
chunk->reserve(padding + size);
chunk->resize(sizeof(method_header));
memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
- chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
+ chunk->insert(chunk->begin(), vmap_table->begin(), vmap_table->end());
if (mapping_table_used) {
chunk->insert(chunk->begin(), mapping_table->begin(), mapping_table->end());
}
@@ -212,7 +212,7 @@
CHECK(method != nullptr);
TimingLogger timings("CommonTest::CompileMethod", false, false);
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
- compiler_driver_->CompileOne(method, &timings);
+ compiler_driver_->CompileOne(Thread::Current(), method, &timings);
TimingLogger::ScopedTiming t2("MakeExecutable", &timings);
MakeExecutable(method);
}
@@ -253,6 +253,7 @@
(size_t)100 * 1024 * 1024, // 100MB
PROT_NONE,
false /* no need for 4gb flag with fixed mmap*/,
+ false /* not reusing existing reservation */,
&error_msg));
CHECK(image_reservation_.get() != nullptr) << error_msg;
}
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 22be28c..1849e7e 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -20,16 +20,29 @@
namespace art {
CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
- const ArrayRef<const uint8_t>& quick_code)
+ const ArrayRef<const uint8_t>& quick_code, bool owns_code_array)
: compiler_driver_(compiler_driver), instruction_set_(instruction_set),
- quick_code_(nullptr) {
+ owns_code_array_(owns_code_array), quick_code_(nullptr) {
SetCode(&quick_code);
}
void CompiledCode::SetCode(const ArrayRef<const uint8_t>* quick_code) {
if (quick_code != nullptr) {
CHECK(!quick_code->empty());
- quick_code_ = compiler_driver_->DeduplicateCode(*quick_code);
+ if (owns_code_array_) {
+ // If we are supposed to own the code, don't deduplicate it.
+ CHECK(quick_code_ == nullptr);
+ quick_code_ = new SwapVector<uint8_t>(quick_code->begin(), quick_code->end(),
+ compiler_driver_->GetSwapSpaceAllocator());
+ } else {
+ quick_code_ = compiler_driver_->DeduplicateCode(*quick_code);
+ }
+ }
+}
+
+CompiledCode::~CompiledCode() {
+ if (owns_code_array_) {
+ delete quick_code_;
}
}
@@ -46,11 +59,11 @@
return (rhs.quick_code_ == nullptr);
}
-uint32_t CompiledCode::AlignCode(uint32_t offset) const {
+size_t CompiledCode::AlignCode(size_t offset) const {
return AlignCode(offset, instruction_set_);
}
-uint32_t CompiledCode::AlignCode(uint32_t offset, InstructionSet instruction_set) {
+size_t CompiledCode::AlignCode(size_t offset, InstructionSet instruction_set) {
return RoundUp(offset, GetInstructionSetAlignment(instruction_set));
}
@@ -120,17 +133,39 @@
const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<LinkerPatch>& patches)
- : CompiledCode(driver, instruction_set, quick_code), frame_size_in_bytes_(frame_size_in_bytes),
- core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
- src_mapping_table_(src_mapping_table == nullptr ?
- driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>()) :
- driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>(src_mapping_table->Arrange()))),
- mapping_table_(mapping_table.data() == nullptr ?
- nullptr : driver->DeduplicateMappingTable(mapping_table)),
- vmap_table_(driver->DeduplicateVMapTable(vmap_table)),
- gc_map_(native_gc_map.data() == nullptr ? nullptr : driver->DeduplicateGCMap(native_gc_map)),
- cfi_info_(cfi_info.data() == nullptr ? nullptr : driver->DeduplicateCFIInfo(cfi_info)),
+ : CompiledCode(driver, instruction_set, quick_code, !driver->DedupeEnabled()),
+ owns_arrays_(!driver->DedupeEnabled()),
+ frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask),
+ fp_spill_mask_(fp_spill_mask),
patches_(patches.begin(), patches.end(), driver->GetSwapSpaceAllocator()) {
+ if (owns_arrays_) {
+ if (src_mapping_table == nullptr) {
+ src_mapping_table_ = new SwapSrcMap(driver->GetSwapSpaceAllocator());
+ } else {
+ src_mapping_table->Arrange();
+ src_mapping_table_ = new SwapSrcMap(src_mapping_table->begin(), src_mapping_table->end(),
+ driver->GetSwapSpaceAllocator());
+ }
+ mapping_table_ = mapping_table.empty() ?
+ nullptr : new SwapVector<uint8_t>(mapping_table.begin(), mapping_table.end(),
+ driver->GetSwapSpaceAllocator());
+ vmap_table_ = new SwapVector<uint8_t>(vmap_table.begin(), vmap_table.end(),
+ driver->GetSwapSpaceAllocator());
+ gc_map_ = native_gc_map.empty() ? nullptr :
+ new SwapVector<uint8_t>(native_gc_map.begin(), native_gc_map.end(),
+ driver->GetSwapSpaceAllocator());
+ cfi_info_ = cfi_info.empty() ? nullptr :
+ new SwapVector<uint8_t>(cfi_info.begin(), cfi_info.end(), driver->GetSwapSpaceAllocator());
+ } else {
+ src_mapping_table_ = src_mapping_table == nullptr ?
+ driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>()) :
+ driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>(src_mapping_table->Arrange()));
+ mapping_table_ = mapping_table.empty() ?
+ nullptr : driver->DeduplicateMappingTable(mapping_table);
+ vmap_table_ = driver->DeduplicateVMapTable(vmap_table);
+ gc_map_ = native_gc_map.empty() ? nullptr : driver->DeduplicateGCMap(native_gc_map);
+ cfi_info_ = cfi_info.empty() ? nullptr : driver->DeduplicateCFIInfo(cfi_info);
+ }
}
CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
@@ -194,4 +229,14 @@
alloc.deallocate(m, 1);
}
+CompiledMethod::~CompiledMethod() {
+ if (owns_arrays_) {
+ delete src_mapping_table_;
+ delete mapping_table_;
+ delete vmap_table_;
+ delete gc_map_;
+ delete cfi_info_;
+ }
+}
+
} // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 6013507..d6a07f6 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -27,10 +27,6 @@
#include "utils/array_ref.h"
#include "utils/swap_space.h"
-namespace llvm {
- class Function;
-} // namespace llvm
-
namespace art {
class CompilerDriver;
@@ -39,7 +35,9 @@
public:
// For Quick to supply an code blob
CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
- const ArrayRef<const uint8_t>& quick_code);
+ const ArrayRef<const uint8_t>& quick_code, bool owns_code_array);
+
+ virtual ~CompiledCode();
InstructionSet GetInstructionSet() const {
return instruction_set_;
@@ -56,8 +54,8 @@
// To align an offset from a page-aligned value to make it suitable
// for code storage. For example on ARM, to ensure that PC relative
// valu computations work out as expected.
- uint32_t AlignCode(uint32_t offset) const;
- static uint32_t AlignCode(uint32_t offset, InstructionSet instruction_set);
+ size_t AlignCode(size_t offset) const;
+ static size_t AlignCode(size_t offset, InstructionSet instruction_set);
// returns the difference between the code address and a usable PC.
// mainly to cope with kThumb2 where the lower bit must be set.
@@ -78,6 +76,9 @@
const InstructionSet instruction_set_;
+ // If we own the code array (means that we free in destructor).
+ const bool owns_code_array_;
+
// Used to store the PIC code for Quick.
SwapVector<uint8_t>* quick_code_;
@@ -122,6 +123,7 @@
using std::vector<SrcMapElem, Allocator>::size;
explicit SrcMap() {}
+ explicit SrcMap(const Allocator& alloc) : std::vector<SrcMapElem, Allocator>(alloc) {}
template <class InputIt>
SrcMap(InputIt first, InputIt last, const Allocator& alloc)
@@ -291,7 +293,7 @@
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<LinkerPatch>& patches = ArrayRef<LinkerPatch>());
- ~CompiledMethod() {}
+ virtual ~CompiledMethod();
static CompiledMethod* SwapAllocCompiledMethod(
CompilerDriver* driver,
@@ -347,9 +349,9 @@
return mapping_table_;
}
- const SwapVector<uint8_t>& GetVmapTable() const {
+ const SwapVector<uint8_t>* GetVmapTable() const {
DCHECK(vmap_table_ != nullptr);
- return *vmap_table_;
+ return vmap_table_;
}
SwapVector<uint8_t> const* GetGcMap() const {
@@ -365,6 +367,8 @@
}
private:
+ // Whether or not the arrays are owned by the compiled method or dedupe sets.
+ const bool owns_arrays_;
// For quick code, the size of the activation used by the code.
const size_t frame_size_in_bytes_;
// For quick code, a bit mask describing spilled GPR callee-save registers.
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index 54e34ea..b91c3ca 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -142,7 +142,7 @@
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx, def->type);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc
index 954e9f1..4d2b8b3 100644
--- a/compiler/dex/gvn_dead_code_elimination_test.cc
+++ b/compiler/dex/gvn_dead_code_elimination_test.cc
@@ -143,7 +143,7 @@
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx, def->type);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 97ea05a..379c952 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -21,8 +21,8 @@
#include "base/arena_object.h"
#include "base/logging.h"
+#include "dex_instruction_utils.h"
#include "global_value_numbering.h"
-#include "utils/dex_instruction_utils.h"
namespace art {
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index d1c3a6b..566527a 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -96,7 +96,7 @@
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx, def->type);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 31dbc60..a89b250 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -416,8 +416,8 @@
// 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
kAnInvoke | kAnHeavyWeight,
- // 73 UNUSED_73
- kAnNone,
+ // 73 RETURN_VOID_BARRIER
+ kAnBranch,
// 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
kAnInvoke | kAnHeavyWeight,
@@ -752,88 +752,88 @@
// E2 USHR_INT_LIT8 vAA, vBB, #+CC
kAnMath | kAnInt,
- // E3 IGET_VOLATILE
+ // E3 IGET_QUICK
kAnNone,
- // E4 IPUT_VOLATILE
+ // E4 IGET_WIDE_QUICK
kAnNone,
- // E5 SGET_VOLATILE
+ // E5 IGET_OBJECT_QUICK
kAnNone,
- // E6 SPUT_VOLATILE
+ // E6 IPUT_QUICK
kAnNone,
- // E7 IGET_OBJECT_VOLATILE
+ // E7 IPUT_WIDE_QUICK
kAnNone,
- // E8 IGET_WIDE_VOLATILE
+ // E8 IPUT_OBJECT_QUICK
kAnNone,
- // E9 IPUT_WIDE_VOLATILE
- kAnNone,
-
- // EA SGET_WIDE_VOLATILE
- kAnNone,
-
- // EB SPUT_WIDE_VOLATILE
- kAnNone,
-
- // EC BREAKPOINT
- kAnNone,
-
- // ED THROW_VERIFICATION_ERROR
- kAnHeavyWeight | kAnBranch,
-
- // EE EXECUTE_INLINE
- kAnNone,
-
- // EF EXECUTE_INLINE_RANGE
- kAnNone,
-
- // F0 INVOKE_OBJECT_INIT_RANGE
+ // E9 INVOKE_VIRTUAL_QUICK
kAnInvoke | kAnHeavyWeight,
- // F1 RETURN_VOID_BARRIER
- kAnBranch,
-
- // F2 IGET_QUICK
- kAnNone,
-
- // F3 IGET_WIDE_QUICK
- kAnNone,
-
- // F4 IGET_OBJECT_QUICK
- kAnNone,
-
- // F5 IPUT_QUICK
- kAnNone,
-
- // F6 IPUT_WIDE_QUICK
- kAnNone,
-
- // F7 IPUT_OBJECT_QUICK
- kAnNone,
-
- // F8 INVOKE_VIRTUAL_QUICK
+ // EA INVOKE_VIRTUAL_RANGE_QUICK
kAnInvoke | kAnHeavyWeight,
- // F9 INVOKE_VIRTUAL_QUICK_RANGE
- kAnInvoke | kAnHeavyWeight,
-
- // FA INVOKE_SUPER_QUICK
- kAnInvoke | kAnHeavyWeight,
-
- // FB INVOKE_SUPER_QUICK_RANGE
- kAnInvoke | kAnHeavyWeight,
-
- // FC IPUT_OBJECT_VOLATILE
+ // EB IPUT_BOOLEAN_QUICK
kAnNone,
- // FD SGET_OBJECT_VOLATILE
+ // EC IPUT_BYTE_QUICK
kAnNone,
- // FE SPUT_OBJECT_VOLATILE
+ // ED IPUT_CHAR_QUICK
+ kAnNone,
+
+ // EE IPUT_SHORT_QUICK
+ kAnNone,
+
+ // EF IGET_BOOLEAN_QUICK
+ kAnNone,
+
+ // F0 IGET_BYTE_QUICK
+ kAnNone,
+
+ // F1 IGET_CHAR_QUICK
+ kAnNone,
+
+ // F2 IGET_SHORT_QUICK
+ kAnNone,
+
+ // F3 UNUSED_F3
+ kAnNone,
+
+ // F4 UNUSED_F4
+ kAnNone,
+
+ // F5 UNUSED_F5
+ kAnNone,
+
+ // F6 UNUSED_F6
+ kAnNone,
+
+ // F7 UNUSED_F7
+ kAnNone,
+
+ // F8 UNUSED_F8
+ kAnNone,
+
+ // F9 UNUSED_F9
+ kAnNone,
+
+ // FA UNUSED_FA
+ kAnNone,
+
+ // FB UNUSED_FB
+ kAnNone,
+
+ // FC UNUSED_FC
+ kAnNone,
+
+ // FD UNUSED_FD
+ kAnNone,
+
+ // FE UNUSED_FE
kAnNone,
// FF UNUSED_FF
@@ -1203,12 +1203,13 @@
}
void MIRGraph::DoCacheFieldLoweringInfo() {
+ static constexpr uint32_t kFieldIndexFlagQuickened = 0x80000000;
// All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 2u;
ScopedArenaAllocator allocator(&cu_->arena_stack);
- uint16_t* field_idxs = allocator.AllocArray<uint16_t>(max_refs, kArenaAllocMisc);
- DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(max_refs, kArenaAllocMisc);
-
+ auto* field_idxs = allocator.AllocArray<uint32_t>(max_refs, kArenaAllocMisc);
+ DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(
+ max_refs, kArenaAllocMisc);
// Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
size_t ifield_pos = 0u;
size_t sfield_pos = max_refs;
@@ -1221,23 +1222,36 @@
// Get field index and try to find it among existing indexes. If found, it's usually among
// the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
// is a linear search, it actually performs much better than map based approach.
- if (IsInstructionIGetOrIPut(mir->dalvikInsn.opcode)) {
- uint16_t field_idx = mir->dalvikInsn.vC;
+ const bool is_iget_or_iput = IsInstructionIGetOrIPut(mir->dalvikInsn.opcode);
+ const bool is_iget_or_iput_quick = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode);
+ if (is_iget_or_iput || is_iget_or_iput_quick) {
+ uint32_t field_idx;
+ DexMemAccessType access_type;
+ if (is_iget_or_iput) {
+ field_idx = mir->dalvikInsn.vC;
+ access_type = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
+ } else {
+ DCHECK(is_iget_or_iput_quick);
+ // Set kFieldIndexFlagQuickened so that we don't deduplicate against non quickened field
+ // indexes.
+ field_idx = mir->offset | kFieldIndexFlagQuickened;
+ access_type = IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode);
+ }
size_t i = ifield_pos;
while (i != 0u && field_idxs[i - 1] != field_idx) {
--i;
}
if (i != 0u) {
mir->meta.ifield_lowering_info = i - 1;
- DCHECK_EQ(field_types[i - 1], IGetOrIPutMemAccessType(mir->dalvikInsn.opcode));
+ DCHECK_EQ(field_types[i - 1], access_type);
} else {
mir->meta.ifield_lowering_info = ifield_pos;
field_idxs[ifield_pos] = field_idx;
- field_types[ifield_pos] = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
+ field_types[ifield_pos] = access_type;
++ifield_pos;
}
} else if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
- uint16_t field_idx = mir->dalvikInsn.vB;
+ auto field_idx = mir->dalvikInsn.vB;
size_t i = sfield_pos;
while (i != max_refs && field_idxs[i] != field_idx) {
++i;
@@ -1261,7 +1275,12 @@
DCHECK_EQ(ifield_lowering_infos_.size(), 0u);
ifield_lowering_infos_.reserve(ifield_pos);
for (size_t pos = 0u; pos != ifield_pos; ++pos) {
- ifield_lowering_infos_.push_back(MirIFieldLoweringInfo(field_idxs[pos], field_types[pos]));
+ const uint32_t field_idx = field_idxs[pos];
+ const bool is_quickened = (field_idx & kFieldIndexFlagQuickened) != 0;
+ const uint32_t masked_field_idx = field_idx & ~kFieldIndexFlagQuickened;
+ CHECK_LT(masked_field_idx, 1u << 16);
+ ifield_lowering_infos_.push_back(
+ MirIFieldLoweringInfo(masked_field_idx, field_types[pos], is_quickened));
}
MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
ifield_lowering_infos_.data(), ifield_pos);
@@ -1282,18 +1301,19 @@
void MIRGraph::DoCacheMethodLoweringInfo() {
static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
+ static constexpr uint32_t kMethodIdxFlagQuickened = 0x80000000;
// Embed the map value in the entry to avoid extra padding in 64-bit builds.
struct MapEntry {
// Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
const MethodReference* devirt_target;
- uint16_t target_method_idx;
+ uint32_t target_method_idx;
+ uint32_t vtable_idx;
uint16_t invoke_type;
// Map value.
uint32_t lowering_info_index;
};
- // Sort INVOKEs by method index, then by opcode, then by devirtualization target.
struct MapEntryComparator {
bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
if (lhs.target_method_idx != rhs.target_method_idx) {
@@ -1302,6 +1322,9 @@
if (lhs.invoke_type != rhs.invoke_type) {
return lhs.invoke_type < rhs.invoke_type;
}
+ if (lhs.vtable_idx != rhs.vtable_idx) {
+ return lhs.vtable_idx < rhs.vtable_idx;
+ }
if (lhs.devirt_target != rhs.devirt_target) {
if (lhs.devirt_target == nullptr) {
return true;
@@ -1319,7 +1342,7 @@
ScopedArenaAllocator allocator(&cu_->arena_stack);
// All INVOKE instructions take 3 code units and there must also be a RETURN.
- uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
+ const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
// Map invoke key (see MapEntry) to lowering info index and vice versa.
// The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
@@ -1330,28 +1353,43 @@
allocator.AllocArray<const MapEntry*>(max_refs, kArenaAllocMisc);
// Find INVOKE insns and their devirtualization targets.
+ const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
if (bb->block_type != kDalvikByteCode) {
continue;
}
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- if (IsInstructionInvoke(mir->dalvikInsn.opcode)) {
- // Decode target method index and invoke type.
- uint16_t target_method_idx = mir->dalvikInsn.vB;
- DexInvokeType invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
-
+ const bool is_quick_invoke = IsInstructionQuickInvoke(mir->dalvikInsn.opcode);
+ const bool is_invoke = IsInstructionInvoke(mir->dalvikInsn.opcode);
+ if (is_quick_invoke || is_invoke) {
+ uint32_t vtable_index = 0;
+ uint32_t target_method_idx = 0;
+ uint32_t invoke_type_idx = 0; // Default to virtual (in case of quickened).
+ DCHECK_EQ(invoke_types[invoke_type_idx], kVirtual);
+ if (is_quick_invoke) {
+ // We need to store the vtable index since we can't necessarily recreate it at resolve
+ // phase if the dequickening resolved to an interface method.
+ vtable_index = mir->dalvikInsn.vB;
+ // Fake up the method index by storing the mir offset so that we can read the dequicken
+ // info in resolve.
+ target_method_idx = mir->offset | kMethodIdxFlagQuickened;
+ } else {
+ DCHECK(is_invoke);
+ // Decode target method index and invoke type.
+ invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
+ target_method_idx = mir->dalvikInsn.vB;
+ }
// Find devirtualization target.
// TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
// ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
// and increment it as needed instead of making O(log n) lookups.
- const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
-
// Try to insert a new entry. If the insertion fails, we will have found an old one.
MapEntry entry = {
devirt_target,
target_method_idx,
+ vtable_index,
invoke_types[invoke_type_idx],
static_cast<uint32_t>(invoke_map.size())
};
@@ -1362,22 +1400,24 @@
}
}
}
-
if (invoke_map.empty()) {
return;
}
-
// Prepare unique method infos, set method info indexes for their MIRs.
- DCHECK_EQ(method_lowering_infos_.size(), 0u);
const size_t count = invoke_map.size();
method_lowering_infos_.reserve(count);
for (size_t pos = 0u; pos != count; ++pos) {
const MapEntry* entry = sequential_entries[pos];
- MirMethodLoweringInfo method_info(entry->target_method_idx,
- static_cast<InvokeType>(entry->invoke_type));
+ const bool is_quick = (entry->target_method_idx & kMethodIdxFlagQuickened) != 0;
+ const uint32_t masked_method_idx = entry->target_method_idx & ~kMethodIdxFlagQuickened;
+ MirMethodLoweringInfo method_info(masked_method_idx,
+ static_cast<InvokeType>(entry->invoke_type), is_quick);
if (entry->devirt_target != nullptr) {
method_info.SetDevirtualizationTarget(*entry->devirt_target);
}
+ if (is_quick) {
+ method_info.SetVTableIndex(entry->vtable_idx);
+ }
method_lowering_infos_.push_back(method_info);
}
MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index f9f7e22..dfaff6c 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -374,7 +374,7 @@
// 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // 73 UNUSED_73
+ // 73 RETURN_VOID_BARRIER
DF_NOP,
// 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
@@ -710,89 +710,89 @@
// E2 USHR_INT_LIT8 vAA, vBB, #+CC
DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // E3 IGET_VOLATILE
+ // E3 IGET_QUICK
DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
- // E4 IPUT_VOLATILE
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // E5 SGET_VOLATILE
- DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // E6 SPUT_VOLATILE
- DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // E7 IGET_OBJECT_VOLATILE
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // E8 IGET_WIDE_VOLATILE
+ // E4 IGET_WIDE_QUICK
DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
- // E9 IPUT_WIDE_VOLATILE
+ // E5 IGET_OBJECT_QUICK
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // E6 IPUT_QUICK
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // E7 IPUT_WIDE_QUICK
DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
- // EA SGET_WIDE_VOLATILE
- DF_DA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // EB SPUT_WIDE_VOLATILE
- DF_UA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // EC BREAKPOINT
- DF_NOP,
-
- // ED THROW_VERIFICATION_ERROR
- DF_NOP | DF_UMS,
-
- // EE EXECUTE_INLINE
- DF_FORMAT_35C,
-
- // EF EXECUTE_INLINE_RANGE
- DF_FORMAT_3RC,
-
- // F0 INVOKE_OBJECT_INIT_RANGE
- DF_NOP,
-
- // F1 RETURN_VOID_BARRIER
- DF_NOP,
-
- // F2 IGET_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
- // F3 IGET_WIDE_QUICK
- DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
- // F4 IGET_OBJECT_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
- // F5 IPUT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
- // F6 IPUT_WIDE_QUICK
- DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
- // F7 IPUT_OBJECT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
- // F8 INVOKE_VIRTUAL_QUICK
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // F9 INVOKE_VIRTUAL_QUICK_RANGE
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // FA INVOKE_SUPER_QUICK
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // FB INVOKE_SUPER_QUICK_RANGE
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // FC IPUT_OBJECT_VOLATILE
+ // E8 IPUT_OBJECT_QUICK
DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
- // FD SGET_OBJECT_VOLATILE
- DF_DA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
+ // E9 INVOKE_VIRTUAL_QUICK
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // FE SPUT_OBJECT_VOLATILE
- DF_UA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
+ // EA INVOKE_VIRTUAL_RANGE_QUICK
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // EB IPUT_BOOLEAN_QUICK vA, vB, index
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // EC IPUT_BYTE_QUICK vA, vB, index
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // ED IPUT_CHAR_QUICK vA, vB, index
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // EE IPUT_SHORT_QUICK vA, vB, index
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // EF IGET_BOOLEAN_QUICK vA, vB, index
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // F0 IGET_BYTE_QUICK vA, vB, index
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // F1 IGET_CHAR_QUICK vA, vB, index
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // F2 IGET_SHORT_QUICK vA, vB, index
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // F3 UNUSED_F3
+ DF_NOP,
+
+ // F4 UNUSED_F4
+ DF_NOP,
+
+ // F5 UNUSED_F5
+ DF_NOP,
+
+ // F6 UNUSED_F6
+ DF_NOP,
+
+ // F7 UNUSED_F7
+ DF_NOP,
+
+ // F8 UNUSED_F8
+ DF_NOP,
+
+ // F9 UNUSED_F9
+ DF_NOP,
+
+ // FA UNUSED_FA
+ DF_NOP,
+
+ // FB UNUSED_FB
+ DF_NOP,
+
+ // FC UNUSED_FC
+ DF_NOP,
+
+ // FD UNUSED_FD
+ DF_NOP,
+
+ // FE UNUSED_FE
+ DF_NOP,
// FF UNUSED_FF
DF_NOP,
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
index 53afcad..d2079a2 100644
--- a/compiler/dex/mir_field_info.cc
+++ b/compiler/dex/mir_field_info.cc
@@ -35,8 +35,9 @@
DCHECK(field_infos != nullptr);
DCHECK_NE(count, 0u);
for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
- MirIFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType());
- DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+ MirIFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType(), it->IsQuickened());
+ unresolved.field_offset_ = it->field_offset_;
+ unresolved.CheckEquals(*it);
}
}
@@ -49,13 +50,30 @@
hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
Handle<mirror::Class> referrer_class(hs.NewHandle(
compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
+ const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod();
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve fields and record all available info.
-
for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
- uint32_t field_idx = it->field_idx_;
- mirror::ArtField* resolved_field =
- compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, false);
+ uint32_t field_idx;
+ mirror::ArtField* resolved_field;
+ if (!it->IsQuickened()) {
+ field_idx = it->field_idx_;
+ resolved_field = compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit,
+ field_idx, false);
+ } else {
+ const auto mir_offset = it->field_idx_;
+ // For quickened instructions, it->field_offset_ actually contains the mir offset.
+ // We need to use the de-quickening info to get dex file / field idx
+ auto* field_idx_ptr = verified_method->GetDequickenIndex(mir_offset);
+ CHECK(field_idx_ptr != nullptr);
+ field_idx = field_idx_ptr->index;
+ StackHandleScope<1> hs2(soa.Self());
+ auto h_dex_cache = hs2.NewHandle(compiler_driver->FindDexCache(field_idx_ptr->dex_file));
+ resolved_field = compiler_driver->ResolveFieldWithDexFile(
+ soa, h_dex_cache, class_loader, field_idx_ptr->dex_file, field_idx, false);
+ // Since we don't have a valid field index we can't go slow path later.
+ CHECK(resolved_field != nullptr);
+ }
if (UNLIKELY(resolved_field == nullptr)) {
continue;
}
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index 98b2da8..ca56958 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -19,8 +19,8 @@
#include "base/macros.h"
#include "dex_file.h"
+#include "dex_instruction_utils.h"
#include "offsets.h"
-#include "utils/dex_instruction_utils.h"
namespace art {
@@ -39,6 +39,9 @@
uint16_t FieldIndex() const {
return field_idx_;
}
+ void SetFieldIndex(uint16_t field_idx) {
+ field_idx_ = field_idx;
+ }
bool IsStatic() const {
return (flags_ & kFlagIsStatic) != 0u;
@@ -51,6 +54,9 @@
const DexFile* DeclaringDexFile() const {
return declaring_dex_file_;
}
+ void SetDeclaringDexFile(const DexFile* dex_file) {
+ declaring_dex_file_ = dex_file;
+ }
uint16_t DeclaringClassIndex() const {
return declaring_class_idx_;
@@ -64,20 +70,35 @@
return (flags_ & kFlagIsVolatile) != 0u;
}
+ // IGET_QUICK, IGET_BYTE_QUICK, ...
+ bool IsQuickened() const {
+ return (flags_ & kFlagIsQuickened) != 0u;
+ }
+
DexMemAccessType MemAccessType() const {
return static_cast<DexMemAccessType>((flags_ >> kBitMemAccessTypeBegin) & kMemAccessTypeMask);
}
+ void CheckEquals(const MirFieldInfo& other) const {
+ CHECK_EQ(field_idx_, other.field_idx_);
+ CHECK_EQ(flags_, other.flags_);
+ CHECK_EQ(declaring_field_idx_, other.declaring_field_idx_);
+ CHECK_EQ(declaring_class_idx_, other.declaring_class_idx_);
+ CHECK_EQ(declaring_dex_file_, other.declaring_dex_file_);
+ }
+
protected:
enum {
kBitIsStatic = 0,
kBitIsVolatile,
+ kBitIsQuickened,
kBitMemAccessTypeBegin,
kBitMemAccessTypeEnd = kBitMemAccessTypeBegin + 3, // 3 bits for raw type.
kFieldInfoBitEnd = kBitMemAccessTypeEnd
};
static constexpr uint16_t kFlagIsVolatile = 1u << kBitIsVolatile;
static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
+ static constexpr uint16_t kFlagIsQuickened = 1u << kBitIsQuickened;
static constexpr uint16_t kMemAccessTypeMask = 7u;
static_assert((1u << (kBitMemAccessTypeEnd - kBitMemAccessTypeBegin)) - 1u == kMemAccessTypeMask,
"Invalid raw type mask");
@@ -117,8 +138,10 @@
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Construct an unresolved instance field lowering info.
- explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type)
- : MirFieldInfo(field_idx, kFlagIsVolatile, type), // Without kFlagIsStatic.
+ explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened)
+ : MirFieldInfo(field_idx,
+ kFlagIsVolatile | (is_quickened ? kFlagIsQuickened : 0u),
+ type), // Without kFlagIsStatic.
field_offset_(0u) {
}
@@ -134,6 +157,11 @@
return field_offset_;
}
+ void CheckEquals(const MirIFieldLoweringInfo& other) const {
+ MirFieldInfo::CheckEquals(other);
+ CHECK_EQ(field_offset_.Uint32Value(), other.field_offset_.Uint32Value());
+ }
+
private:
enum {
kBitFastGet = kFieldInfoBitEnd,
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 76b5e44..f354a49 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -1673,12 +1673,6 @@
}
}
-const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) {
- // TODO: for inlining support, use current code unit.
- const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx);
- return cu_->dex_file->GetShorty(method_id.proto_idx_);
-}
-
const char* MIRGraph::GetShortyFromMethodReference(const MethodReference& target_method) {
const DexFile::MethodId& method_id =
target_method.dex_file->GetMethodId(target_method.dex_method_index);
@@ -1724,8 +1718,7 @@
* high-word loc for wide arguments. Also pull up any following
* MOVE_RESULT and incorporate it into the invoke.
*/
-CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
- bool is_range) {
+CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range) {
CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
kArenaAllocMisc));
MIR* move_result_mir = FindMoveResult(bb, mir);
@@ -1744,6 +1737,13 @@
info->opt_flags = mir->optimization_flags;
info->type = type;
info->is_range = is_range;
+ if (IsInstructionQuickInvoke(mir->dalvikInsn.opcode)) {
+ const auto& method_info = GetMethodLoweringInfo(mir);
+ info->method_ref = method_info.GetTargetMethod();
+ } else {
+ info->method_ref = MethodReference(GetCurrentDexCompilationUnit()->GetDexFile(),
+ mir->dalvikInsn.vB);
+ }
info->index = mir->dalvikInsn.vB;
info->offset = mir->offset;
info->mir = mir;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index e5abd3b..3dae5b4 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -504,6 +504,7 @@
int opt_flags;
InvokeType type;
uint32_t dex_idx;
+ MethodReference method_ref;
uint32_t index; // Method idx for invokes, type idx for FilledNewArray.
uintptr_t direct_code;
uintptr_t direct_method;
@@ -687,7 +688,7 @@
void DoCacheMethodLoweringInfo();
- const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) {
+ const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const {
DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size());
return method_lowering_infos_[mir->meta.method_lowering_info];
}
@@ -1132,7 +1133,6 @@
std::string GetSSAName(int ssa_reg);
std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
void GetBlockName(BasicBlock* bb, char* name);
- const char* GetShortyFromTargetIdx(int);
const char* GetShortyFromMethodReference(const MethodReference& target_method);
void DumpMIRGraph();
CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index b234950..3d3d979 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -33,51 +33,103 @@
DCHECK(method_infos != nullptr);
DCHECK_NE(count, 0u);
for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
- MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType());
+ MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType(), it->IsQuickened());
+ unresolved.declaring_dex_file_ = it->declaring_dex_file_;
+ unresolved.vtable_idx_ = it->vtable_idx_;
if (it->target_dex_file_ != nullptr) {
unresolved.target_dex_file_ = it->target_dex_file_;
unresolved.target_method_idx_ = it->target_method_idx_;
}
- DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+ if (kIsDebugBuild) {
+ unresolved.CheckEquals(*it);
+ }
}
}
// We're going to resolve methods and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<4> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
Handle<mirror::Class> referrer_class(hs.NewHandle(
compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
+ auto current_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve methods and record all available info.
+ const DexFile* const dex_file = mUnit->GetDexFile();
+ const bool use_jit = Runtime::Current()->UseJit();
+ const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod();
for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
+ // For quickened invokes, the dex method idx is actually the mir offset.
+ if (it->IsQuickened()) {
+ const auto* dequicken_ref = verified_method->GetDequickenIndex(it->method_idx_);
+ CHECK(dequicken_ref != nullptr);
+ it->target_dex_file_ = dequicken_ref->dex_file;
+ it->target_method_idx_ = dequicken_ref->index;
+ }
// Remember devirtualized invoke target and set the called method to the default.
MethodReference devirt_ref(it->target_dex_file_, it->target_method_idx_);
MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr;
- it->target_dex_file_ = mUnit->GetDexFile();
- it->target_method_idx_ = it->MethodIndex();
-
InvokeType invoke_type = it->GetInvokeType();
- mirror::ArtMethod* resolved_method =
- compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit, it->MethodIndex(),
- invoke_type);
+ mirror::ArtMethod* resolved_method = nullptr;
+ if (!it->IsQuickened()) {
+ it->target_dex_file_ = dex_file;
+ it->target_method_idx_ = it->MethodIndex();
+ current_dex_cache.Assign(dex_cache.Get());
+ resolved_method = compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit,
+ it->MethodIndex(), invoke_type);
+ } else {
+ // The method index is actually the dex PC in this case.
+ // Calculate the proper dex file and target method idx.
+ CHECK(use_jit);
+ CHECK_EQ(invoke_type, kVirtual);
+ // Don't devirt if we are in a different dex file since we can't have direct invokes in
+ // another dex file unless we always put a direct / patch pointer.
+ devirt_target = nullptr;
+ current_dex_cache.Assign(
+ Runtime::Current()->GetClassLinker()->FindDexCache(*it->target_dex_file_));
+ CHECK(current_dex_cache.Get() != nullptr);
+ DexCompilationUnit cu(
+ mUnit->GetCompilationUnit(), mUnit->GetClassLoader(), mUnit->GetClassLinker(),
+ *it->target_dex_file_, nullptr /* code_item not used */, 0u /* class_def_idx not used */,
+ it->target_method_idx_, 0u /* access_flags not used */,
+ nullptr /* verified_method not used */);
+ resolved_method = compiler_driver->ResolveMethod(soa, current_dex_cache, class_loader, &cu,
+ it->target_method_idx_, invoke_type, false);
+ if (resolved_method != nullptr) {
+ // Since this was a dequickened virtual, it is guaranteed to be resolved. However, it may be
+ // resolved to an interface method. If this is the case then change the invoke type to
+ // interface with the assumption that sharp_type will be kVirtual.
+ if (resolved_method->GetInvokeType() == kInterface) {
+ it->flags_ = (it->flags_ & ~(kInvokeTypeMask << kBitInvokeTypeBegin)) |
+ (static_cast<uint16_t>(kInterface) << kBitInvokeTypeBegin);
+ }
+ }
+ }
if (UNLIKELY(resolved_method == nullptr)) {
continue;
}
compiler_driver->GetResolvedMethodDexFileLocation(resolved_method,
&it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_method_idx_);
- it->vtable_idx_ = compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type);
+ if (!it->IsQuickened()) {
+ // For quickened invoke virtuals we may have desharpened to an interface method which
+ // wont give us the right method index, in this case blindly dispatch or else we can't
+ // compile the method. Converting the invoke to interface dispatch doesn't work since we
+ // have no way to get the dex method index for quickened invoke virtuals in the interface
+ // trampolines.
+ it->vtable_idx_ =
+ compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type);
+ }
- MethodReference target_method(mUnit->GetDexFile(), it->MethodIndex());
+ MethodReference target_method(it->target_dex_file_, it->target_method_idx_);
int fast_path_flags = compiler_driver->IsFastInvoke(
- soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method, &invoke_type,
- &target_method, devirt_target, &it->direct_code_, &it->direct_method_);
- bool is_referrers_class = (referrer_class.Get() == resolved_method->GetDeclaringClass());
- bool is_class_initialized =
+ soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method,
+ &invoke_type, &target_method, devirt_target, &it->direct_code_, &it->direct_method_);
+ const bool is_referrers_class = referrer_class.Get() == resolved_method->GetDeclaringClass();
+ const bool is_class_initialized =
compiler_driver->IsMethodsClassInitialized(referrer_class.Get(), resolved_method);
uint16_t other_flags = it->flags_ &
~(kFlagFastPath | kFlagClassIsInitialized | (kInvokeTypeMask << kBitSharpTypeBegin));
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 08fb103..e131c96 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -46,6 +46,9 @@
const DexFile* DeclaringDexFile() const {
return declaring_dex_file_;
}
+ void SetDeclaringDexFile(const DexFile* dex_file) {
+ declaring_dex_file_ = dex_file;
+ }
uint16_t DeclaringClassIndex() const {
return declaring_class_idx_;
@@ -98,11 +101,12 @@
MirMethodLoweringInfo* method_infos, size_t count)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- MirMethodLoweringInfo(uint16_t method_idx, InvokeType type)
+ MirMethodLoweringInfo(uint16_t method_idx, InvokeType type, bool is_quickened)
: MirMethodInfo(method_idx,
((type == kStatic) ? kFlagIsStatic : 0u) |
(static_cast<uint16_t>(type) << kBitInvokeTypeBegin) |
- (static_cast<uint16_t>(type) << kBitSharpTypeBegin)),
+ (static_cast<uint16_t>(type) << kBitSharpTypeBegin) |
+ (is_quickened ? kFlagQuickened : 0u)),
direct_code_(0u),
direct_method_(0u),
target_dex_file_(nullptr),
@@ -131,6 +135,11 @@
return (flags_ & kFlagClassIsInitialized) != 0u;
}
+ // Returns true iff the method invoke is INVOKE_VIRTUAL_QUICK or INVOKE_VIRTUAL_RANGE_QUICK.
+ bool IsQuickened() const {
+ return (flags_ & kFlagQuickened) != 0u;
+ }
+
InvokeType GetInvokeType() const {
return static_cast<InvokeType>((flags_ >> kBitInvokeTypeBegin) & kInvokeTypeMask);
}
@@ -146,6 +155,9 @@
uint16_t VTableIndex() const {
return vtable_idx_;
}
+ void SetVTableIndex(uint16_t index) {
+ vtable_idx_ = index;
+ }
uintptr_t DirectCode() const {
return direct_code_;
@@ -159,6 +171,20 @@
return stats_flags_;
}
+ void CheckEquals(const MirMethodLoweringInfo& info) const {
+ CHECK_EQ(method_idx_, info.method_idx_);
+ CHECK_EQ(flags_, info.flags_);
+ CHECK_EQ(declaring_method_idx_, info.declaring_method_idx_);
+ CHECK_EQ(declaring_class_idx_, info.declaring_class_idx_);
+ CHECK_EQ(declaring_dex_file_, info.declaring_dex_file_);
+ CHECK_EQ(direct_code_, info.direct_code_);
+ CHECK_EQ(direct_method_, info.direct_method_);
+ CHECK_EQ(target_dex_file_, info.target_dex_file_);
+ CHECK_EQ(target_method_idx_, info.target_method_idx_);
+ CHECK_EQ(vtable_idx_, info.vtable_idx_);
+ CHECK_EQ(stats_flags_, info.stats_flags_);
+ }
+
private:
enum {
kBitFastPath = kMethodInfoBitEnd,
@@ -168,12 +194,14 @@
kBitSharpTypeEnd = kBitSharpTypeBegin + 3, // 3 bits for sharp type.
kBitIsReferrersClass = kBitSharpTypeEnd,
kBitClassIsInitialized,
+ kBitQuickened,
kMethodLoweringInfoBitEnd
};
static_assert(kMethodLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath;
static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
static constexpr uint16_t kFlagClassIsInitialized = 1u << kBitClassIsInitialized;
+ static constexpr uint16_t kFlagQuickened = 1u << kBitQuickened;
static constexpr uint16_t kInvokeTypeMask = 7u;
static_assert((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
"assert invoke type bits failed");
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index fd67d4e..93749e4 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1437,7 +1437,7 @@
nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index,
0u /* access_flags not used */, nullptr /* verified_method not used */);
DexMemAccessType type = IGetOrIPutMemAccessType(iget_or_iput->dalvikInsn.opcode);
- MirIFieldLoweringInfo inlined_field_info(field_idx, type);
+ MirIFieldLoweringInfo inlined_field_info(field_idx, type, false);
MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
DCHECK(inlined_field_info.IsResolved());
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index be05b80..9ce5ebb 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -254,7 +254,7 @@
cu_.mir_graph->method_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const MethodDef* def = &defs[i];
- MirMethodLoweringInfo method_info(def->method_idx, def->invoke_type);
+ MirMethodLoweringInfo method_info(def->method_idx, def->invoke_type, false);
if (def->declaring_dex_file != 0u) {
method_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
method_info.declaring_class_idx_ = def->declaring_class_idx;
@@ -407,7 +407,7 @@
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx, def->type);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_class_idx_ = def->declaring_class_idx;
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 3d64833..8833da3 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -117,11 +117,11 @@
"add", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbAddPcRel, 0xa000,
kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | IS_BRANCH | NEEDS_FIXUP,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | NEEDS_FIXUP,
"add", "!0C, pc, #!1E", 2, kFixupLoad),
ENCODING_MAP(kThumbAddSpRel, 0xa800,
kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF_SP | REG_USE_SP,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP,
"add", "!0C, sp, #!2E", 2, kFixupNone),
ENCODING_MAP(kThumbAddSpI7, 0xb000,
kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
@@ -182,7 +182,7 @@
"blx", "!0C", 2, kFixupNone),
ENCODING_MAP(kThumbBx, 0x4700,
kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
+ kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE0 | IS_BRANCH,
"bx", "!0C", 2, kFixupNone),
ENCODING_MAP(kThumbCmnRR, 0x42c0,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
@@ -693,7 +693,7 @@
ENCODING_MAP(kThumb2AdcRRR, 0xeb500000, /* setflags encoding */
kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
kFmtShift, -1, -1,
- IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+ IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES | USES_CCODES,
"adcs", "!0C, !1C, !2C!3H", 4, kFixupNone),
ENCODING_MAP(kThumb2AndRRR, 0xea000000,
kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
@@ -835,15 +835,15 @@
"it:!1b", "!0c", 2, kFixupNone),
ENCODING_MAP(kThumb2Fmstat, 0xeef1fa10,
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, NO_OPERAND | SETS_CCODES,
+ kFmtUnused, -1, -1, NO_OPERAND | SETS_CCODES | USES_CCODES,
"fmstat", "", 4, kFixupNone),
ENCODING_MAP(kThumb2Vcmpd, 0xeeb40b40,
kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
"vcmp.f64", "!0S, !1S", 4, kFixupNone),
ENCODING_MAP(kThumb2Vcmps, 0xeeb40a40,
kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
"vcmp.f32", "!0s, !1s", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrPcRel12, 0xf8df0000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 3159886..2a4d27b 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1079,6 +1079,7 @@
}
LIR* ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target);
}
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 806617b..aa5e5b4 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -111,7 +111,7 @@
const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
ENCODING_MAP(WIDE(kA64Adc3rrr), SF_VARIANTS(0x1a000000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES,
"adc", "!0r, !1r, !2r", kFixupNone),
ENCODING_MAP(WIDE(kA64Add4RRdT), SF_VARIANTS(0x11000000),
kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
@@ -518,7 +518,7 @@
"ror", "!0r, !1r, !2r", kFixupNone),
ENCODING_MAP(WIDE(kA64Sbc3rrr), SF_VARIANTS(0x5a000000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES,
"sbc", "!0r, !1r, !2r", kFixupNone),
ENCODING_MAP(WIDE(kA64Sbfm4rrdd), SF_N_VARIANTS(0x13000000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16,
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 7245853..8e3f4ef 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -427,7 +427,7 @@
InlineMethod intrinsic;
{
ReaderMutexLock mu(Thread::Current(), lock_);
- auto it = inline_methods_.find(info->index);
+ auto it = inline_methods_.find(info->method_ref.dex_method_index);
if (it == inline_methods_.end() || (it->second.flags & kInlineIntrinsic) == 0) {
return false;
}
@@ -718,7 +718,7 @@
if (PrettyMethod(method_idx, *dex_file_) == "int java.lang.String.length()") {
// TODO: String.length is both kIntrinsicIsEmptyOrLength and kInlineOpIGet.
} else {
- LOG(ERROR) << "Inliner: " << PrettyMethod(method_idx, *dex_file_) << " already inline";
+ LOG(WARNING) << "Inliner: " << PrettyMethod(method_idx, *dex_file_) << " already inline";
}
return false;
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 3c9b7a3..afae89d 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -865,7 +865,12 @@
void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
RegLocation rl_dest, RegLocation rl_obj) {
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
- DCHECK_EQ(IGetMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
+ if (kIsDebugBuild) {
+ auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
+ IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
+ IGetMemAccessType(mir->dalvikInsn.opcode);
+ DCHECK_EQ(mem_access_type, field_info.MemAccessType()) << mir->dalvikInsn.opcode;
+ }
cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
if (!ForceSlowFieldPath(cu_) && field_info.FastGet()) {
RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
@@ -939,7 +944,12 @@
void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj) {
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
- DCHECK_EQ(IPutMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
+ if (kIsDebugBuild) {
+ auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
+ IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
+ IPutMemAccessType(mir->dalvikInsn.opcode);
+ DCHECK_EQ(mem_access_type, field_info.MemAccessType());
+ }
cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
if (!ForceSlowFieldPath(cu_) && field_info.FastPut()) {
RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 8e3df7c..01f1d37 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -248,14 +248,16 @@
if (arg0.wide == 0) {
LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide));
if (arg1.wide == 0) {
+ // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
if (cu_->instruction_set == kMips) {
- LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide));
+ LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg1, kNotWide));
} else {
LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide));
}
} else {
+ // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
if (cu_->instruction_set == kMips) {
- LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
+ LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
} else {
LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide));
}
@@ -263,9 +265,19 @@
} else {
LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
if (arg1.wide == 0) {
- LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
+ // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
+ if (cu_->instruction_set == kMips) {
+ LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kNotWide));
+ } else {
+ LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
+ }
} else {
- LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
+ // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
+ if (cu_->instruction_set == kMips) {
+ LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
+ } else {
+ LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
+ }
}
}
}
@@ -863,11 +875,12 @@
RegLocation res;
if (info->result.location == kLocInvalid) {
// If result is unused, return a sink target based on type of invoke target.
- res = GetReturn(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+ res = GetReturn(
+ ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
} else {
res = info->result;
DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+ ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
}
return res;
}
@@ -876,11 +889,12 @@
RegLocation res;
if (info->result.location == kLocInvalid) {
// If result is unused, return a sink target based on type of invoke target.
- res = GetReturnWide(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+ res = GetReturnWide(ShortyToRegClass(
+ mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
} else {
res = info->result;
DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+ ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
}
return res;
}
@@ -1418,7 +1432,8 @@
void Mir2Lir::GenInvoke(CallInfo* info) {
DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
- if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
+ const DexFile* dex_file = info->method_ref.dex_file;
+ if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(dex_file)
->GenIntrinsic(this, info)) {
return;
}
@@ -1428,7 +1443,7 @@
void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
int call_state = 0;
LIR* null_ck;
- LIR** p_null_ck = NULL;
+ LIR** p_null_ck = nullptr;
NextCallInsn next_call_insn;
FlushAllRegs(); /* Everything to home location */
// Explicit register usage
@@ -1440,6 +1455,7 @@
info->type = method_info.GetSharpType();
bool fast_path = method_info.FastPath();
bool skip_this;
+
if (info->type == kInterface) {
next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
skip_this = fast_path;
@@ -1469,7 +1485,8 @@
// Finish up any of the call sequence not interleaved in arg loading
while (call_state >= 0) {
call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
- method_info.DirectCode(), method_info.DirectMethod(), original_type);
+ method_info.DirectCode(), method_info.DirectMethod(),
+ original_type);
}
LIR* call_insn = GenCallInsn(method_info);
MarkSafepointPC(call_insn);
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index ec6edab..2d26922 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -17,6 +17,7 @@
#include "codegen_mips.h"
#include "arch/mips/instruction_set_features_mips.h"
+#include "arch/mips/entrypoints_direct_mips.h"
#include "base/logging.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
@@ -708,7 +709,18 @@
}
LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
+ if (IsDirectEntrypoint(trampoline)) {
+ // Reserve argument space on stack (for $a0-$a3) for
+ // entrypoints that directly reference native implementations.
+ // This is not safe in general, as it violates the frame size
+ // of the Quick method, but it is used here only for calling
+ // native functions, outside of the runtime.
+ OpRegImm(kOpSub, rs_rSP, 16);
+ LIR* retVal = OpReg(op, r_tgt);
+ OpRegImm(kOpAdd, rs_rSP, 16);
+ return retVal;
+ }
+
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 34e5e25..8348626 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -540,6 +540,7 @@
GenMoveException(rl_dest);
break;
+ case Instruction::RETURN_VOID_BARRIER:
case Instruction::RETURN_VOID:
if (((cu_->access_flags & kAccConstructor) != 0) &&
cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
@@ -586,6 +587,9 @@
case Instruction::MOVE_FROM16:
case Instruction::MOVE_OBJECT_FROM16:
StoreValue(rl_dest, rl_src[0]);
+ if (rl_src[0].is_const && (mir_graph_->ConstantValue(rl_src[0]) == 0)) {
+ Workaround7250540(rl_dest, RegStorage::InvalidReg());
+ }
break;
case Instruction::MOVE_WIDE:
@@ -790,10 +794,12 @@
GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2], rl_src[0], 0, false);
break;
+ case Instruction::IGET_OBJECT_QUICK:
case Instruction::IGET_OBJECT:
GenIGet(mir, opt_flags, kReference, Primitive::kPrimNot, rl_dest, rl_src[0]);
break;
+ case Instruction::IGET_WIDE_QUICK:
case Instruction::IGET_WIDE:
// kPrimLong and kPrimDouble share the same entrypoints.
if (rl_dest.fp) {
@@ -803,6 +809,7 @@
}
break;
+ case Instruction::IGET_QUICK:
case Instruction::IGET:
if (rl_dest.fp) {
GenIGet(mir, opt_flags, kSingle, Primitive::kPrimFloat, rl_dest, rl_src[0]);
@@ -811,43 +818,54 @@
}
break;
+ case Instruction::IGET_CHAR_QUICK:
case Instruction::IGET_CHAR:
GenIGet(mir, opt_flags, kUnsignedHalf, Primitive::kPrimChar, rl_dest, rl_src[0]);
break;
+ case Instruction::IGET_SHORT_QUICK:
case Instruction::IGET_SHORT:
GenIGet(mir, opt_flags, kSignedHalf, Primitive::kPrimShort, rl_dest, rl_src[0]);
break;
+ case Instruction::IGET_BOOLEAN_QUICK:
case Instruction::IGET_BOOLEAN:
GenIGet(mir, opt_flags, kUnsignedByte, Primitive::kPrimBoolean, rl_dest, rl_src[0]);
break;
+ case Instruction::IGET_BYTE_QUICK:
case Instruction::IGET_BYTE:
GenIGet(mir, opt_flags, kSignedByte, Primitive::kPrimByte, rl_dest, rl_src[0]);
break;
+ case Instruction::IPUT_WIDE_QUICK:
case Instruction::IPUT_WIDE:
GenIPut(mir, opt_flags, rl_src[0].fp ? kDouble : k64, rl_src[0], rl_src[1]);
break;
+ case Instruction::IPUT_OBJECT_QUICK:
case Instruction::IPUT_OBJECT:
GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1]);
break;
+ case Instruction::IPUT_QUICK:
case Instruction::IPUT:
GenIPut(mir, opt_flags, rl_src[0].fp ? kSingle : k32, rl_src[0], rl_src[1]);
break;
+ case Instruction::IPUT_BYTE_QUICK:
+ case Instruction::IPUT_BOOLEAN_QUICK:
case Instruction::IPUT_BYTE:
case Instruction::IPUT_BOOLEAN:
GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1]);
break;
+ case Instruction::IPUT_CHAR_QUICK:
case Instruction::IPUT_CHAR:
GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1]);
break;
+ case Instruction::IPUT_SHORT_QUICK:
case Instruction::IPUT_SHORT:
GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1]);
break;
@@ -921,9 +939,12 @@
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
break;
+ case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
break;
+
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
break;
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 19c2a5a..fcf4716 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -542,6 +542,11 @@
void QuickCompiler::InitCompilationUnit(CompilationUnit& cu) const {
// Disable optimizations according to instruction set.
cu.disable_opt |= kDisabledOptimizationsPerISA[cu.instruction_set];
+ if (Runtime::Current()->UseJit()) {
+ // Disable these optimizations for JIT until quickened byte codes are done being implemented.
+ // TODO: Find a cleaner way to do this.
+ cu.disable_opt |= 1u << kLocalValueNumbering;
+ }
}
void QuickCompiler::Init() {
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 4ff173d..150bdac 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -66,11 +66,16 @@
// TODO: Investigate why are we doing the work again for this method and try to avoid it.
LOG(WARNING) << "Method processed more than once: "
<< PrettyMethod(ref.dex_method_index, *ref.dex_file);
- DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
- DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
+ if (!Runtime::Current()->UseJit()) {
+ DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
+ DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
+ }
DCHECK_EQ(it->second->GetDexGcMap().size(), verified_method->GetDexGcMap().size());
- delete it->second;
- verified_methods_.erase(it);
+ // Delete the new verified method since there was already an existing one registered. It
+ // is unsafe to replace the existing one since the JIT may be using it to generate a
+ // native GC map.
+ delete verified_method;
+ return true;
}
verified_methods_.Put(ref, verified_method);
DCHECK(verified_methods_.find(ref) != verified_methods_.end());
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 21e965d..42d66be 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -24,6 +24,7 @@
#include "base/stl_util.h"
#include "dex_file.h"
#include "dex_instruction-inl.h"
+#include "dex_instruction_utils.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
@@ -52,6 +53,11 @@
if (method_verifier->HasVirtualOrInterfaceInvokes()) {
verified_method->GenerateDevirtMap(method_verifier);
}
+
+ // Only need dequicken info for JIT so far.
+ if (Runtime::Current()->UseJit()) {
+ verified_method->GenerateDequickenMap(method_verifier);
+ }
}
if (method_verifier->HasCheckCasts()) {
@@ -65,6 +71,12 @@
return (it != devirt_map_.end()) ? &it->second : nullptr;
}
+const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const {
+ DCHECK(Runtime::Current()->UseJit());
+ auto it = dequicken_map_.find(dex_pc);
+ return (it != dequicken_map_.end()) ? &it->second : nullptr;
+}
+
bool VerifiedMethod::IsSafeCast(uint32_t pc) const {
return std::binary_search(safe_cast_set_.begin(), safe_cast_set_.end(), pc);
}
@@ -182,7 +194,7 @@
*log2_max_gc_pc = i;
}
-void VerifiedMethod::GenerateDeQuickenMap(verifier::MethodVerifier* method_verifier) {
+void VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verifier) {
if (method_verifier->HasFailures()) {
return;
}
@@ -196,13 +208,24 @@
if (is_virtual_quick || is_range_quick) {
uint32_t dex_pc = inst->GetDexPc(insns);
verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- mirror::ArtMethod* method = method_verifier->GetQuickInvokedMethod(inst, line,
- is_range_quick);
+ mirror::ArtMethod* method =
+ method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick);
CHECK(method != nullptr);
// The verifier must know what the type of the object was or else we would have gotten a
// failure. Put the dex method index in the dequicken map since we need this to get number of
// arguments in the compiler.
- dequicken_map_.Put(dex_pc, method->ToMethodReference());
+ dequicken_map_.Put(dex_pc, DexFileReference(method->GetDexFile(),
+ method->GetDexMethodIndex()));
+ } else if (IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
+ mirror::ArtField* field = method_verifier->GetQuickFieldAccess(inst, line);
+ CHECK(field != nullptr);
+ // The verifier must know what the type of the field was or else we would have gotten a
+ // failure. Put the dex field index in the dequicken map since we need this for lowering
+ // in the compiler.
+ // TODO: Putting a field index in a method reference is gross.
+ dequicken_map_.Put(dex_pc, DexFileReference(field->GetDexFile(), field->GetDexFieldIndex()));
}
}
}
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index fe9dfd1..748bdcb 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -20,6 +20,7 @@
#include <vector>
#include "base/mutex.h"
+#include "dex_file.h"
#include "method_reference.h"
#include "safe_map.h"
@@ -39,6 +40,9 @@
// Devirtualization map type maps dex offset to concrete method reference.
typedef SafeMap<uint32_t, MethodReference> DevirtualizationMap;
+ // Devirtualization map type maps dex offset to field / method idx.
+ typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
+
static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
~VerifiedMethod() = default;
@@ -58,6 +62,10 @@
// Returns the devirtualization target method, or nullptr if none.
const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
+ // Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
+ // no entry for that dex pc.
+ const DexFileReference* GetDequickenIndex(uint32_t dex_pc) const;
+
// Returns true if the cast can statically be verified to be redundant
// by using the check-cast elision peephole optimization in the verifier.
bool IsSafeCast(uint32_t pc) const;
@@ -86,7 +94,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Generate dequickening map into dequicken_map_.
- void GenerateDeQuickenMap(verifier::MethodVerifier* method_verifier)
+ void GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Generate safe case set into safe_cast_set_.
@@ -95,9 +103,9 @@
std::vector<uint8_t> dex_gc_map_;
DevirtualizationMap devirt_map_;
- // Dequicken map is required for having the compiler compiled quickened invokes. The quicken map
- // enables us to get the dex method index so that we can get the required argument count.
- DevirtualizationMap dequicken_map_;
+ // Dequicken map is required for compiling quickened byte codes. The quicken maps from
+ // dex PC to dex method index or dex field index based on the instruction.
+ DequickenMap dequicken_map_;
SafeCastSet safe_cast_set_;
};
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index b620969..2b78e38 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -19,6 +19,7 @@
#include "compiler_ir.h"
#include "dex/dataflow_iterator-inl.h"
#include "dex_flags.h"
+#include "driver/dex_compilation_unit.h"
namespace art {
@@ -259,8 +260,8 @@
if ((flags & Instruction::kInvoke) &&
(attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
DCHECK_EQ(next, 0);
- int target_idx = mir->dalvikInsn.vB;
- const char* shorty = GetShortyFromTargetIdx(target_idx);
+ const auto& lowering_info = GetMethodLoweringInfo(mir);
+ const char* shorty = GetShortyFromMethodReference(lowering_info.GetTargetMethod());
// Handle result type if floating point
if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
MIR* move_result_mir = FindMoveResult(bb, mir);
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 9948c82..4a35e9f 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -56,14 +56,13 @@
return referrer_class;
}
-inline mirror::ArtField* CompilerDriver::ResolveField(
+inline mirror::ArtField* CompilerDriver::ResolveFieldWithDexFile(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
+ Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
uint32_t field_idx, bool is_static) {
- DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
- DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- mirror::ArtField* resolved_field = mUnit->GetClassLinker()->ResolveField(
- *mUnit->GetDexFile(), field_idx, dex_cache, class_loader, is_static);
+ DCHECK_EQ(dex_cache->GetDexFile(), dex_file);
+ mirror::ArtField* resolved_field = Runtime::Current()->GetClassLinker()->ResolveField(
+ *dex_file, field_idx, dex_cache, class_loader, is_static);
DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
if (UNLIKELY(resolved_field == nullptr)) {
// Clean up any exception left by type resolution.
@@ -78,6 +77,19 @@
return resolved_field;
}
+inline mirror::DexCache* CompilerDriver::FindDexCache(const DexFile* dex_file) {
+ return Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file);
+}
+
+inline mirror::ArtField* CompilerDriver::ResolveField(
+ const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
+ uint32_t field_idx, bool is_static) {
+ DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ return ResolveFieldWithDexFile(soa, dex_cache, class_loader, mUnit->GetDexFile(), field_idx,
+ is_static);
+}
+
inline void CompilerDriver::GetResolvedFieldDexFileLocation(
mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) {
@@ -172,7 +184,7 @@
inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
- uint32_t method_idx, InvokeType invoke_type) {
+ uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
mirror::ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
@@ -184,7 +196,8 @@
soa.Self()->ClearException();
return nullptr;
}
- if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
+ if (check_incompatible_class_change &&
+ UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
// Silently return nullptr on incompatible class change.
return nullptr;
}
@@ -227,14 +240,14 @@
target_method->dex_method_index))) {
return 0;
}
-
// Sharpen a virtual call into a direct call when the target is known not to have been
// overridden (ie is final).
- bool can_sharpen_virtual_based_on_type =
+ const bool same_dex_file = target_method->dex_file == mUnit->GetDexFile();
+ bool can_sharpen_virtual_based_on_type = same_dex_file &&
(*invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
// For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
// the super class.
- bool can_sharpen_super_based_on_type = (*invoke_type == kSuper) &&
+ bool can_sharpen_super_based_on_type = same_dex_file && (*invoke_type == kSuper) &&
(referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
resolved_method->GetMethodIndex() < methods_class->GetVTableLength() &&
(methods_class->GetVTableEntry(resolved_method->GetMethodIndex()) == resolved_method) &&
@@ -243,10 +256,10 @@
if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) {
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
// dex cache, check that this resolved method is where we expect it.
- CHECK(target_method->dex_file == mUnit->GetDexFile());
- DCHECK(dex_cache.Get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
- resolved_method) << PrettyMethod(resolved_method);
+ CHECK_EQ(target_method->dex_file, mUnit->GetDexFile());
+ DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index),
+ resolved_method) << PrettyMethod(resolved_method);
int stats_flags = kFlagMethodResolved;
GetCodeAndMethodForDirectCall(/*out*/invoke_type,
kDirect, // Sharp type
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index b8a8936..029fd46 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -360,6 +360,7 @@
classes_to_compile_(compiled_classes),
thread_count_(thread_count),
stats_(new AOTCompilationStats),
+ dedupe_enabled_(true),
dump_stats_(dump_stats),
dump_passes_(dump_passes),
dump_cfg_file_name_(dump_cfg_file_name),
@@ -380,12 +381,7 @@
compiler_->Init();
- CHECK(!Runtime::Current()->IsStarted());
- if (image_) {
- CHECK(image_classes_.get() != nullptr);
- } else {
- CHECK(image_classes_.get() == nullptr);
- }
+ CHECK_EQ(image_, image_classes_.get() != nullptr);
// Read the profile file if one is provided.
if (!profile_file.empty()) {
@@ -399,26 +395,32 @@
}
SwapVector<uint8_t>* CompilerDriver::DeduplicateCode(const ArrayRef<const uint8_t>& code) {
+ DCHECK(dedupe_enabled_);
return dedupe_code_.Add(Thread::Current(), code);
}
SwapSrcMap* CompilerDriver::DeduplicateSrcMappingTable(const ArrayRef<SrcMapElem>& src_map) {
+ DCHECK(dedupe_enabled_);
return dedupe_src_mapping_table_.Add(Thread::Current(), src_map);
}
SwapVector<uint8_t>* CompilerDriver::DeduplicateMappingTable(const ArrayRef<const uint8_t>& code) {
+ DCHECK(dedupe_enabled_);
return dedupe_mapping_table_.Add(Thread::Current(), code);
}
SwapVector<uint8_t>* CompilerDriver::DeduplicateVMapTable(const ArrayRef<const uint8_t>& code) {
+ DCHECK(dedupe_enabled_);
return dedupe_vmap_table_.Add(Thread::Current(), code);
}
SwapVector<uint8_t>* CompilerDriver::DeduplicateGCMap(const ArrayRef<const uint8_t>& code) {
+ DCHECK(dedupe_enabled_);
return dedupe_gc_map_.Add(Thread::Current(), code);
}
SwapVector<uint8_t>* CompilerDriver::DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info) {
+ DCHECK(dedupe_enabled_);
return dedupe_cfi_info_.Add(Thread::Current(), cfi_info);
}
@@ -491,8 +493,12 @@
static DexToDexCompilationLevel GetDexToDexCompilationlevel(
Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file,
const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ auto* const runtime = Runtime::Current();
+ if (runtime->UseJit()) {
+ return kDontDexToDexCompile;
+ }
const char* descriptor = dex_file.GetClassDescriptor(class_def);
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ClassLinker* class_linker = runtime->GetClassLinker();
mirror::Class* klass = class_linker->FindClass(self, descriptor, class_loader);
if (klass == nullptr) {
CHECK(self->IsExceptionPending());
@@ -518,9 +524,8 @@
}
}
-void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger* timings) {
+void CompilerDriver::CompileOne(Thread* self, mirror::ArtMethod* method, TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
- Thread* self = Thread::Current();
jobject jclass_loader;
const DexFile* dex_file;
uint16_t class_def_idx;
@@ -529,9 +534,8 @@
InvokeType invoke_type = method->GetInvokeType();
{
ScopedObjectAccessUnchecked soa(self);
- ScopedLocalRef<jobject>
- local_class_loader(soa.Env(),
- soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader()));
+ ScopedLocalRef<jobject> local_class_loader(
+ soa.Env(), soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader()));
jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get());
// Find the dex_file
dex_file = method->GetDexFile();
@@ -549,7 +553,7 @@
// Can we run DEX-to-DEX compiler on this class ?
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
{
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
@@ -557,14 +561,35 @@
dex_to_dex_compilation_level = GetDexToDexCompilationlevel(self, class_loader, *dex_file,
class_def);
}
- CompileMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx, jclass_loader,
- *dex_file, dex_to_dex_compilation_level, true);
+ CompileMethod(self, code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ jclass_loader, *dex_file, dex_to_dex_compilation_level, true);
self->GetJniEnv()->DeleteGlobalRef(jclass_loader);
-
self->TransitionFromSuspendedToRunnable();
}
+CompiledMethod* CompilerDriver::CompileMethod(Thread* self, mirror::ArtMethod* method) {
+ const uint32_t method_idx = method->GetDexMethodIndex();
+ const uint32_t access_flags = method->GetAccessFlags();
+ const InvokeType invoke_type = method->GetInvokeType();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ method->GetDeclaringClass()->GetClassLoader()));
+ jobject jclass_loader = class_loader.ToJObject();
+ const DexFile* dex_file = method->GetDexFile();
+ const uint16_t class_def_idx = method->GetClassDefIndex();
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
+ DexToDexCompilationLevel dex_to_dex_compilation_level =
+ GetDexToDexCompilationlevel(self, class_loader, *dex_file, class_def);
+ const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+ self->TransitionFromRunnableToSuspended(kNative);
+ CompileMethod(self, code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ jclass_loader, *dex_file, dex_to_dex_compilation_level, true);
+ auto* compiled_method = GetCompiledMethod(MethodReference(dex_file, method_idx));
+ self->TransitionFromSuspendedToRunnable();
+ return compiled_method;
+}
+
void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
@@ -1035,7 +1060,8 @@
bool* is_type_initialized, bool* use_direct_type_ptr,
uintptr_t* direct_type_ptr, bool* out_is_finalizable) {
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ Runtime* runtime = Runtime::Current();
+ mirror::DexCache* dex_cache = runtime->GetClassLinker()->FindDexCache(dex_file);
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
return false;
@@ -1045,7 +1071,8 @@
return false;
}
*out_is_finalizable = resolved_class->IsFinalizable();
- const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
+ gc::Heap* heap = runtime->GetHeap();
+ const bool compiling_boot = heap->IsCompilingBoot();
const bool support_boot_image_fixup = GetSupportBootImageFixup();
if (compiling_boot) {
// boot -> boot class pointers.
@@ -1061,10 +1088,15 @@
} else {
return false;
}
+ } else if (runtime->UseJit() && !heap->IsMovableObject(resolved_class)) {
+ *is_type_initialized = resolved_class->IsInitialized();
+ // If the class may move around, then don't embed it as a direct pointer.
+ *use_direct_type_ptr = true;
+ *direct_type_ptr = reinterpret_cast<uintptr_t>(resolved_class);
+ return true;
} else {
// True if the class is in the image at app compiling time.
- const bool class_in_image =
- Runtime::Current()->GetHeap()->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
+ const bool class_in_image = heap->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
if (class_in_image && support_boot_image_fixup) {
// boot -> app class pointers.
*is_type_initialized = resolved_class->IsInitialized();
@@ -1257,8 +1289,10 @@
// invoked, so this can be passed to the out-of-line runtime support code.
*direct_code = 0;
*direct_method = 0;
+ Runtime* const runtime = Runtime::Current();
+ gc::Heap* const heap = runtime->GetHeap();
bool use_dex_cache = GetCompilerOptions().GetCompilePic(); // Off by default
- const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
+ const bool compiling_boot = heap->IsCompilingBoot();
// TODO This is somewhat hacky. We should refactor all of this invoke codepath.
const bool force_relocations = (compiling_boot ||
GetCompilerOptions().GetIncludePatchInformation());
@@ -1267,20 +1301,29 @@
}
// TODO: support patching on all architectures.
use_dex_cache = use_dex_cache || (force_relocations && !support_boot_image_fixup_);
- bool method_code_in_boot = (method->GetDeclaringClass()->GetClassLoader() == nullptr);
+ mirror::Class* declaring_class = method->GetDeclaringClass();
+ bool method_code_in_boot = declaring_class->GetClassLoader() == nullptr;
if (!use_dex_cache) {
if (!method_code_in_boot) {
use_dex_cache = true;
} else {
bool has_clinit_trampoline =
- method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
- if (has_clinit_trampoline && (method->GetDeclaringClass() != referrer_class)) {
+ method->IsStatic() && !declaring_class->IsInitialized();
+ if (has_clinit_trampoline && declaring_class != referrer_class) {
// Ensure we run the clinit trampoline unless we are invoking a static method in the same
// class.
use_dex_cache = true;
}
}
}
+ if (runtime->UseJit()) {
+ // If we are the JIT, then don't allow a direct call to the interpreter bridge since this will
+ // never be updated even after we compile the method.
+ if (runtime->GetClassLinker()->IsQuickToInterpreterBridge(
+ reinterpret_cast<const void*>(compiler_->GetEntryPointOf(method)))) {
+ use_dex_cache = true;
+ }
+ }
if (method_code_in_boot) {
*stats_flags |= kFlagDirectCallToBoot | kFlagDirectMethodToBoot;
}
@@ -1302,7 +1345,9 @@
// The method is defined not within this dex file. We need a dex cache slot within the current
// dex file or direct pointers.
bool must_use_direct_pointers = false;
- if (target_method->dex_file == method->GetDeclaringClass()->GetDexCache()->GetDexFile()) {
+ mirror::DexCache* dex_cache = declaring_class->GetDexCache();
+ if (target_method->dex_file == dex_cache->GetDexFile() &&
+ !(runtime->UseJit() && dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) == nullptr)) {
target_method->dex_method_index = method->GetDexMethodIndex();
} else {
if (no_guarantee_of_dex_cache_entry) {
@@ -1315,7 +1360,7 @@
} else {
if (force_relocations && !use_dex_cache) {
target_method->dex_method_index = method->GetDexMethodIndex();
- target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ target_method->dex_file = dex_cache->GetDexFile();
}
must_use_direct_pointers = true;
}
@@ -1330,8 +1375,7 @@
*type = sharp_type;
}
} else {
- bool method_in_image =
- Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
+ bool method_in_image = heap->FindSpaceFromObject(method, false)->IsImageSpace();
if (method_in_image || compiling_boot) {
// We know we must be able to get to the method in the image, so use that pointer.
CHECK(!method->IsAbstract());
@@ -1926,8 +1970,7 @@
if (!success) {
CHECK(soa.Self()->IsExceptionPending());
- ThrowLocation throw_location;
- mirror::Throwable* exception = soa.Self()->GetException(&throw_location);
+ mirror::Throwable* exception = soa.Self()->GetException(nullptr);
VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
<< exception->Dump();
std::ostream* file_log = manager->GetCompiler()->
@@ -2000,10 +2043,11 @@
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
ClassLinker* class_linker = manager->GetClassLinker();
jobject jclass_loader = manager->GetClassLoader();
+ Thread* self = Thread::Current();
{
// Use a scoped object access to perform to the quick SkipClass check.
const char* descriptor = dex_file.GetClassDescriptor(class_def);
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
StackHandleScope<3> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
@@ -2030,7 +2074,7 @@
// Can we run DEX-to-DEX compiler on this class ?
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
{
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
@@ -2061,7 +2105,7 @@
continue;
}
previous_direct_method_idx = method_idx;
- driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+ driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
compilation_enabled);
@@ -2078,7 +2122,7 @@
continue;
}
previous_virtual_method_idx = method_idx;
- driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+ driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
compilation_enabled);
@@ -2111,10 +2155,10 @@
}
}
-void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
- InvokeType invoke_type, uint16_t class_def_idx,
- uint32_t method_idx, jobject class_loader,
- const DexFile& dex_file,
+void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_item,
+ uint32_t access_flags, InvokeType invoke_type,
+ uint16_t class_def_idx, uint32_t method_idx,
+ jobject class_loader, const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level,
bool compilation_enabled) {
CompiledMethod* compiled_method = nullptr;
@@ -2162,7 +2206,6 @@
}
}
- Thread* self = Thread::Current();
if (compiled_method != nullptr) {
// Count non-relative linker patches.
size_t non_relative_linker_patch_count = 0u;
@@ -2194,6 +2237,21 @@
}
}
+void CompilerDriver::RemoveCompiledMethod(const MethodReference& method_ref) {
+ CompiledMethod* compiled_method = nullptr;
+ {
+ MutexLock mu(Thread::Current(), compiled_methods_lock_);
+ auto it = compiled_methods_.find(method_ref);
+ if (it != compiled_methods_.end()) {
+ compiled_method = it->second;
+ compiled_methods_.erase(it);
+ }
+ }
+ if (compiled_method != nullptr) {
+ CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, compiled_method);
+ }
+}
+
CompiledClass* CompilerDriver::GetCompiledClass(ClassReference ref) const {
MutexLock mu(Thread::Current(), compiled_classes_lock_);
ClassTable::const_iterator it = compiled_classes_.find(ref);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index b756244..24b6f17 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -45,6 +45,10 @@
namespace art {
+namespace mirror {
+class DexCache;
+} // namespace mirror
+
namespace verifier {
class MethodVerifier;
} // namespace verifier
@@ -107,8 +111,11 @@
TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
+ CompiledMethod* CompileMethod(Thread* self, mirror::ArtMethod*)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) WARN_UNUSED;
+
// Compile a single Method.
- void CompileOne(mirror::ArtMethod* method, TimingLogger* timings)
+ void CompileOne(Thread* self, mirror::ArtMethod* method, TimingLogger* timings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
VerificationResults* GetVerificationResults() const {
@@ -172,6 +179,9 @@
size_t GetNonRelativeLinkerPatchCount() const
LOCKS_EXCLUDED(compiled_methods_lock_);
+ // Remove and delete a compiled method.
+ void RemoveCompiledMethod(const MethodReference& method_ref);
+
void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
uint16_t class_def_index);
bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file, uint16_t class_def_index);
@@ -226,6 +236,13 @@
uint32_t field_idx, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Resolve a field with a given dex file.
+ mirror::ArtField* ResolveFieldWithDexFile(
+ const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
+ uint32_t field_idx, bool is_static)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Get declaration location of a resolved field.
void GetResolvedFieldDexFileLocation(
mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
@@ -235,6 +252,10 @@
bool IsFieldVolatile(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
MemberOffset GetFieldOffset(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Find a dex cache for a dex file.
+ inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
std::pair<bool, bool> IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
@@ -261,7 +282,7 @@
mirror::ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
- uint32_t method_idx, InvokeType invoke_type)
+ uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get declaration location of a resolved field.
@@ -295,6 +316,13 @@
void ProcessedStaticField(bool resolved, bool local);
void ProcessedInvoke(InvokeType invoke_type, int flags);
+ void ComputeFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
+ const ScopedObjectAccess& soa, bool is_static,
+ mirror::ArtField** resolved_field,
+ mirror::Class** referrer_class,
+ mirror::DexCache** dex_cache)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
MemberOffset* field_offset, bool* is_volatile)
@@ -380,6 +408,13 @@
return timings_logger_;
}
+ void SetDedupeEnabled(bool dedupe_enabled) {
+ dedupe_enabled_ = dedupe_enabled;
+ }
+ bool DedupeEnabled() const {
+ return dedupe_enabled_;
+ }
+
// Checks if class specified by type_idx is one of the image_classes_
bool IsImageClass(const char* descriptor) const;
@@ -484,7 +519,7 @@
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
+ void CompileMethod(Thread* self, const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
jobject class_loader, const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level,
@@ -545,6 +580,7 @@
class AOTCompilationStats;
std::unique_ptr<AOTCompilationStats> stats_;
+ bool dedupe_enabled_;
bool dump_stats_;
const bool dump_passes_;
const std::string& dump_cfg_file_name_;
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 94268de..9ab3602 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -538,6 +538,8 @@
Elf_Word rodata_size,
Elf_Word text_relative_offset,
Elf_Word text_size,
+ Elf_Word bss_relative_offset,
+ Elf_Word bss_size,
const bool add_symbols,
bool debug = false)
: oat_writer_(oat_writer),
@@ -547,6 +549,7 @@
text_builder_(".text", text_size, text_relative_offset, SHT_PROGBITS,
SHF_ALLOC | SHF_EXECINSTR),
rodata_builder_(".rodata", rodata_size, rodata_relative_offset, SHT_PROGBITS, SHF_ALLOC),
+ bss_builder_(".bss", bss_size, bss_relative_offset, SHT_NOBITS, SHF_ALLOC),
dynsym_builder_(".dynsym", SHT_DYNSYM, ".dynstr", SHT_STRTAB, true),
symtab_builder_(".symtab", SHT_SYMTAB, ".strtab", SHT_STRTAB, false),
hash_builder_(".hash", SHT_HASH, SHF_ALLOC, &dynsym_builder_, 0, sizeof(Elf_Word),
@@ -569,6 +572,11 @@
}
bool Init() {
+ // Since the .text section of an oat file contains relative references to .rodata
+ // and (optionally) .bss, we keep these 2 or 3 sections together. This creates
+ // a non-traditional layout where the .bss section is mapped independently of the
+ // .dynamic section and needs its own program header with LOAD RW.
+ //
// The basic layout of the elf file. Order may be different in final output.
// +-------------------------+
// | Elf_Ehdr |
@@ -576,6 +584,7 @@
// | Elf_Phdr PHDR |
// | Elf_Phdr LOAD R | .dynsym .dynstr .hash .rodata
// | Elf_Phdr LOAD R X | .text
+ // | Elf_Phdr LOAD RW | .bss (Optional)
// | Elf_Phdr LOAD RW | .dynamic
// | Elf_Phdr DYNAMIC | .dynamic
// +-------------------------+
@@ -584,6 +593,8 @@
// | Elf_Sym oatdata |
// | Elf_Sym oatexec |
// | Elf_Sym oatlastword |
+ // | Elf_Sym oatbss | (Optional)
+ // | Elf_Sym oatbsslastword | (Optional)
// +-------------------------+
// | .dynstr |
// | \0 |
@@ -631,6 +642,7 @@
// | .hash\0 |
// | .rodata\0 |
// | .text\0 |
+ // | .bss\0 | (Optional)
// | .shstrtab\0 |
// | .symtab\0 | (Optional)
// | .strtab\0 | (Optional)
@@ -654,8 +666,9 @@
// | Elf_Shdr .dynsym |
// | Elf_Shdr .dynstr |
// | Elf_Shdr .hash |
- // | Elf_Shdr .text |
// | Elf_Shdr .rodata |
+ // | Elf_Shdr .text |
+ // | Elf_Shdr .bss | (Optional)
// | Elf_Shdr .dynamic |
// | Elf_Shdr .shstrtab |
// | Elf_Shdr .debug_info | (Optional)
@@ -694,8 +707,11 @@
program_headers_[PH_LOAD_R_X].p_type = PT_LOAD;
program_headers_[PH_LOAD_R_X].p_flags = PF_R | PF_X;
- program_headers_[PH_LOAD_RW_].p_type = PT_LOAD;
- program_headers_[PH_LOAD_RW_].p_flags = PF_R | PF_W;
+ program_headers_[PH_LOAD_RW_BSS].p_type = PT_LOAD;
+ program_headers_[PH_LOAD_RW_BSS].p_flags = PF_R | PF_W;
+
+ program_headers_[PH_LOAD_RW_DYNAMIC].p_type = PT_LOAD;
+ program_headers_[PH_LOAD_RW_DYNAMIC].p_flags = PF_R | PF_W;
program_headers_[PH_DYNAMIC].p_type = PT_DYNAMIC;
program_headers_[PH_DYNAMIC].p_flags = PF_R | PF_W;
@@ -760,6 +776,14 @@
text_builder_.SetSectionIndex(section_index_);
section_index_++;
+ // Setup .bss
+ if (bss_builder_.GetSize() != 0u) {
+ section_ptrs_.push_back(bss_builder_.GetSection());
+ AssignSectionStr(&bss_builder_, &shstrtab_);
+ bss_builder_.SetSectionIndex(section_index_);
+ section_index_++;
+ }
+
// Setup .dynamic
section_ptrs_.push_back(dynamic_builder_.GetSection());
AssignSectionStr(&dynamic_builder_, &shstrtab_);
@@ -820,10 +844,20 @@
CHECK_ALIGNED(rodata_builder_.GetSection()->sh_offset +
rodata_builder_.GetSection()->sh_size, kPageSize);
+ // Get the layout of the .bss section.
+ bss_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*bss_builder_.GetSection(),
+ *text_builder_.GetSection());
+ bss_builder_.GetSection()->sh_addr = bss_builder_.GetSection()->sh_offset;
+ bss_builder_.GetSection()->sh_size = bss_builder_.GetSize();
+ bss_builder_.GetSection()->sh_link = bss_builder_.GetLink();
+
// Get the layout of the dynamic section.
- dynamic_builder_.GetSection()->sh_offset =
- NextOffset<Elf_Word, Elf_Shdr>(*dynamic_builder_.GetSection(), *text_builder_.GetSection());
- dynamic_builder_.GetSection()->sh_addr = dynamic_builder_.GetSection()->sh_offset;
+ CHECK(IsAlignedParam(bss_builder_.GetSection()->sh_offset,
+ dynamic_builder_.GetSection()->sh_addralign));
+ dynamic_builder_.GetSection()->sh_offset = bss_builder_.GetSection()->sh_offset;
+ dynamic_builder_.GetSection()->sh_addr =
+ NextOffset<Elf_Word, Elf_Shdr>(*dynamic_builder_.GetSection(), *bss_builder_.GetSection());
dynamic_builder_.GetSection()->sh_size = dynamic_builder_.GetSize() * sizeof(Elf_Dyn);
dynamic_builder_.GetSection()->sh_link = dynamic_builder_.GetLink();
@@ -987,16 +1021,23 @@
program_headers_[PH_LOAD_R_X].p_memsz = load_rx_size;
program_headers_[PH_LOAD_R_X].p_align = text_builder_.GetSection()->sh_addralign;
- program_headers_[PH_LOAD_RW_].p_offset = dynamic_builder_.GetSection()->sh_offset;
- program_headers_[PH_LOAD_RW_].p_vaddr = dynamic_builder_.GetSection()->sh_offset;
- program_headers_[PH_LOAD_RW_].p_paddr = dynamic_builder_.GetSection()->sh_offset;
- program_headers_[PH_LOAD_RW_].p_filesz = dynamic_builder_.GetSection()->sh_size;
- program_headers_[PH_LOAD_RW_].p_memsz = dynamic_builder_.GetSection()->sh_size;
- program_headers_[PH_LOAD_RW_].p_align = dynamic_builder_.GetSection()->sh_addralign;
+ program_headers_[PH_LOAD_RW_BSS].p_offset = bss_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_RW_BSS].p_vaddr = bss_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_RW_BSS].p_paddr = bss_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_RW_BSS].p_filesz = 0;
+ program_headers_[PH_LOAD_RW_BSS].p_memsz = bss_builder_.GetSection()->sh_size;
+ program_headers_[PH_LOAD_RW_BSS].p_align = bss_builder_.GetSection()->sh_addralign;
+
+ program_headers_[PH_LOAD_RW_DYNAMIC].p_offset = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_RW_DYNAMIC].p_vaddr = dynamic_builder_.GetSection()->sh_addr;
+ program_headers_[PH_LOAD_RW_DYNAMIC].p_paddr = dynamic_builder_.GetSection()->sh_addr;
+ program_headers_[PH_LOAD_RW_DYNAMIC].p_filesz = dynamic_builder_.GetSection()->sh_size;
+ program_headers_[PH_LOAD_RW_DYNAMIC].p_memsz = dynamic_builder_.GetSection()->sh_size;
+ program_headers_[PH_LOAD_RW_DYNAMIC].p_align = dynamic_builder_.GetSection()->sh_addralign;
program_headers_[PH_DYNAMIC].p_offset = dynamic_builder_.GetSection()->sh_offset;
- program_headers_[PH_DYNAMIC].p_vaddr = dynamic_builder_.GetSection()->sh_offset;
- program_headers_[PH_DYNAMIC].p_paddr = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_DYNAMIC].p_vaddr = dynamic_builder_.GetSection()->sh_addr;
+ program_headers_[PH_DYNAMIC].p_paddr = dynamic_builder_.GetSection()->sh_addr;
program_headers_[PH_DYNAMIC].p_filesz = dynamic_builder_.GetSection()->sh_size;
program_headers_[PH_DYNAMIC].p_memsz = dynamic_builder_.GetSection()->sh_size;
program_headers_[PH_DYNAMIC].p_align = dynamic_builder_.GetSection()->sh_addralign;
@@ -1004,15 +1045,29 @@
// Finish setup of the Ehdr values.
elf_header_.e_phoff = PHDR_OFFSET;
elf_header_.e_shoff = sections_offset;
- elf_header_.e_phnum = PH_NUM;
+ elf_header_.e_phnum = (bss_builder_.GetSection()->sh_size != 0u) ? PH_NUM : PH_NUM - 1;
elf_header_.e_shnum = section_ptrs_.size();
elf_header_.e_shstrndx = shstrtab_builder_.GetSectionIndex();
// Add the rest of the pieces to the list.
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Elf Header", 0, &elf_header_,
sizeof(elf_header_)));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
- &program_headers_, sizeof(program_headers_)));
+ if (bss_builder_.GetSection()->sh_size != 0u) {
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
+ &program_headers_[0],
+ elf_header_.e_phnum * sizeof(Elf_Phdr)));
+ } else {
+ // Skip PH_LOAD_RW_BSS.
+ Elf_Word part1_size = PH_LOAD_RW_BSS * sizeof(Elf_Phdr);
+ Elf_Word part2_size = (PH_NUM - PH_LOAD_RW_BSS - 1) * sizeof(Elf_Phdr);
+ CHECK_EQ(part1_size + part2_size, elf_header_.e_phnum * sizeof(Elf_Phdr));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
+ &program_headers_[0], part1_size));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers part 2",
+ PHDR_OFFSET + part1_size,
+ &program_headers_[PH_LOAD_RW_BSS + 1],
+ part2_size));
+ }
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynamic",
dynamic_builder_.GetSection()->sh_offset,
dynamic.data(),
@@ -1175,6 +1230,12 @@
text_builder_.GetSize(), STB_GLOBAL, STT_OBJECT);
dynsym_builder_.AddSymbol("oatlastword", &text_builder_, text_builder_.GetSize() - 4,
true, 4, STB_GLOBAL, STT_OBJECT);
+ if (bss_builder_.GetSize() != 0u) {
+ dynsym_builder_.AddSymbol("oatbss", &bss_builder_, 0, true,
+ bss_builder_.GetSize(), STB_GLOBAL, STT_OBJECT);
+ dynsym_builder_.AddSymbol("oatbsslastword", &bss_builder_, bss_builder_.GetSize() - 4,
+ true, 4, STB_GLOBAL, STT_OBJECT);
+ }
}
void AssignSectionStr(ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* builder,
@@ -1213,12 +1274,13 @@
// What phdr is.
static const uint32_t PHDR_OFFSET = sizeof(Elf_Ehdr);
enum : uint8_t {
- PH_PHDR = 0,
- PH_LOAD_R__ = 1,
- PH_LOAD_R_X = 2,
- PH_LOAD_RW_ = 3,
- PH_DYNAMIC = 4,
- PH_NUM = 5,
+ PH_PHDR = 0,
+ PH_LOAD_R__ = 1,
+ PH_LOAD_R_X = 2,
+ PH_LOAD_RW_BSS = 3,
+ PH_LOAD_RW_DYNAMIC = 4,
+ PH_DYNAMIC = 5,
+ PH_NUM = 6,
};
static const uint32_t PHDR_SIZE = sizeof(Elf_Phdr) * PH_NUM;
Elf_Phdr program_headers_[PH_NUM];
@@ -1236,6 +1298,7 @@
ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> text_builder_;
ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> rodata_builder_;
+ ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> bss_builder_;
ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> dynsym_builder_;
ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> symtab_builder_;
ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> hash_builder_;
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 401d5a9..a822b24 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -229,6 +229,7 @@
const OatHeader& oat_header = oat_writer->GetOatHeader();
Elf_Word oat_data_size = oat_header.GetExecutableOffset();
uint32_t oat_exec_size = oat_writer->GetSize() - oat_data_size;
+ uint32_t oat_bss_size = oat_writer->GetBssSize();
OatWriterWrapper wrapper(oat_writer);
@@ -243,6 +244,8 @@
oat_data_size,
oat_data_size,
oat_exec_size,
+ RoundUp(oat_data_size + oat_exec_size, kPageSize),
+ oat_bss_size,
compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols(),
debug));
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index c588e1a..f5f9320 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -408,8 +408,8 @@
bool ImageWriter::AllocMemory() {
size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize);
std::string error_msg;
- image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE,
- false, &error_msg));
+ image_.reset(MemMap::MapAnonymous("image writer image", nullptr, length, PROT_READ | PROT_WRITE,
+ false, false, &error_msg));
if (UNLIKELY(image_.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
return false;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
new file mode 100644
index 0000000..0283791
--- /dev/null
+++ b/compiler/jit/jit_compiler.cc
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit_compiler.h"
+
+#include "arch/instruction_set.h"
+#include "arch/instruction_set_features.h"
+#include "compiler_callbacks.h"
+#include "dex/pass_manager.h"
+#include "dex/quick_compiler_callbacks.h"
+#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "mirror/art_method-inl.h"
+#include "oat_file-inl.h"
+#include "object_lock.h"
+#include "thread_list.h"
+#include "verifier/method_verifier-inl.h"
+
+namespace art {
+namespace jit {
+
+JitCompiler* JitCompiler::Create() {
+ return new JitCompiler();
+}
+
+extern "C" void* jit_load(CompilerCallbacks** callbacks) {
+ VLOG(jit) << "loading jit compiler";
+ auto* const jit_compiler = JitCompiler::Create();
+ CHECK(jit_compiler != nullptr);
+ *callbacks = jit_compiler->GetCompilerCallbacks();
+ VLOG(jit) << "Done loading jit compiler";
+ return jit_compiler;
+}
+
+extern "C" void jit_unload(void* handle) {
+ DCHECK(handle != nullptr);
+ delete reinterpret_cast<JitCompiler*>(handle);
+}
+
+extern "C" bool jit_compile_method(void* handle, mirror::ArtMethod* method, Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ return jit_compiler->CompileMethod(self, method);
+}
+
+JitCompiler::JitCompiler() : total_time_(0) {
+ auto* pass_manager_options = new PassManagerOptions;
+ pass_manager_options->SetDisablePassList("GVN,DCE");
+ compiler_options_.reset(new CompilerOptions(
+ CompilerOptions::kDefaultCompilerFilter,
+ CompilerOptions::kDefaultHugeMethodThreshold,
+ CompilerOptions::kDefaultLargeMethodThreshold,
+ CompilerOptions::kDefaultSmallMethodThreshold,
+ CompilerOptions::kDefaultTinyMethodThreshold,
+ CompilerOptions::kDefaultNumDexMethodsThreshold,
+ false,
+ false,
+ CompilerOptions::kDefaultTopKProfileThreshold,
+ false,
+ false,
+ false,
+ false,
+ false, // pic
+ nullptr,
+ pass_manager_options,
+ nullptr));
+ const InstructionSet instruction_set = kRuntimeISA;
+ instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ cumulative_logger_.reset(new CumulativeLogger("jit times"));
+ verification_results_.reset(new VerificationResults(compiler_options_.get()));
+ method_inliner_map_.reset(new DexFileToMethodInlinerMap);
+ callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
+ method_inliner_map_.get()));
+ compiler_driver_.reset(new CompilerDriver(
+ compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(),
+ Compiler::kQuick, instruction_set, instruction_set_features_.get(), false,
+ nullptr, new std::set<std::string>, 1, false, true,
+ std::string(), cumulative_logger_.get(), -1, std::string()));
+ // Disable dedupe so we can remove compiled methods.
+ compiler_driver_->SetDedupeEnabled(false);
+ compiler_driver_->SetSupportBootImageFixup(false);
+}
+
+JitCompiler::~JitCompiler() {
+}
+
+bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) {
+ uint64_t start_time = NanoTime();
+ StackHandleScope<2> hs(self);
+ self->AssertNoPendingException();
+ Runtime* runtime = Runtime::Current();
+ Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
+ if (runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) {
+ VLOG(jit) << "Already compiled " << PrettyMethod(method);
+ return true; // Already compiled
+ }
+ Handle<mirror::Class> h_class(hs.NewHandle(h_method->GetDeclaringClass()));
+ if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+ VLOG(jit) << "JIT failed to initialize " << PrettyMethod(h_method.Get());
+ return false;
+ }
+ const DexFile* dex_file = h_class->GetDexCache()->GetDexFile();
+ MethodReference method_ref(dex_file, h_method->GetDexMethodIndex());
+ // Only verify if we don't already have verification results.
+ if (verification_results_->GetVerifiedMethod(method_ref) == nullptr) {
+ std::string error;
+ if (verifier::MethodVerifier::VerifyMethod(h_method.Get(), true, &error) ==
+ verifier::MethodVerifier::kHardFailure) {
+ VLOG(jit) << "Not compile method " << PrettyMethod(h_method.Get())
+ << " due to verification failure " << error;
+ return false;
+ }
+ }
+ CompiledMethod* compiled_method(compiler_driver_->CompileMethod(self, h_method.Get()));
+ if (compiled_method == nullptr) {
+ return false;
+ }
+ total_time_ += NanoTime() - start_time;
+ // Don't add the method if we are supposed to be deoptimized.
+ bool result = false;
+ if (!runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) {
+ const void* code = Runtime::Current()->GetClassLinker()->GetOatMethodQuickCodeFor(
+ h_method.Get());
+ if (code != nullptr) {
+ // Already have some compiled code, just use this instead of linking.
+ // TODO: Fix recompilation.
+ h_method->SetEntryPointFromQuickCompiledCode(code);
+ result = true;
+ } else {
+ result = MakeExecutable(compiled_method, h_method.Get());
+ }
+ }
+ // Remove the compiled method to save memory.
+ compiler_driver_->RemoveCompiledMethod(method_ref);
+ return result;
+}
+
+CompilerCallbacks* JitCompiler::GetCompilerCallbacks() const {
+ return callbacks_.get();
+}
+
+uint8_t* JitCompiler::WriteMethodHeaderAndCode(const CompiledMethod* compiled_method,
+ uint8_t* reserve_begin, uint8_t* reserve_end,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map) {
+ reserve_begin += sizeof(OatQuickMethodHeader);
+ reserve_begin = reinterpret_cast<uint8_t*>(
+ compiled_method->AlignCode(reinterpret_cast<uintptr_t>(reserve_begin)));
+ const auto* quick_code = compiled_method->GetQuickCode();
+ CHECK_LE(reserve_begin, reserve_end);
+ CHECK_LE(quick_code->size(), static_cast<size_t>(reserve_end - reserve_begin));
+ auto* code_ptr = reserve_begin;
+ OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
+ // Construct the header last.
+ const auto frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
+ const auto core_spill_mask = compiled_method->GetCoreSpillMask();
+ const auto fp_spill_mask = compiled_method->GetFpSpillMask();
+ const auto code_size = quick_code->size();
+ CHECK_NE(code_size, 0U);
+ std::copy(quick_code->data(), quick_code->data() + code_size, code_ptr);
+ // After we are done writing we need to update the method header.
+ // Write out the method header last.
+ method_header = new(method_header)OatQuickMethodHeader(
+ code_ptr - mapping_table, code_ptr - vmap_table, code_ptr - gc_map, frame_size_in_bytes,
+ core_spill_mask, fp_spill_mask, code_size);
+ // Return the code ptr.
+ return code_ptr;
+}
+
+bool JitCompiler::AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod* compiled_method,
+ OatFile::OatMethod* out_method) {
+ Runtime* runtime = Runtime::Current();
+ JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
+ const auto* quick_code = compiled_method->GetQuickCode();
+ if (quick_code == nullptr) {
+ return false;
+ }
+ const auto code_size = quick_code->size();
+ Thread* const self = Thread::Current();
+ const uint8_t* base = code_cache->CodeCachePtr();
+ auto* const mapping_table = compiled_method->GetMappingTable();
+ auto* const vmap_table = compiled_method->GetVmapTable();
+ auto* const gc_map = compiled_method->GetGcMap();
+ // Write out pre-header stuff.
+ uint8_t* const mapping_table_ptr = code_cache->AddDataArray(
+ self, mapping_table->data(), mapping_table->data() + mapping_table->size());
+ if (mapping_table == nullptr) {
+ return false; // Out of data cache.
+ }
+ uint8_t* const vmap_table_ptr = code_cache->AddDataArray(
+ self, vmap_table->data(), vmap_table->data() + vmap_table->size());
+ if (vmap_table == nullptr) {
+ return false; // Out of data cache.
+ }
+ uint8_t* const gc_map_ptr = code_cache->AddDataArray(
+ self, gc_map->data(), gc_map->data() + gc_map->size());
+ if (gc_map == nullptr) {
+ return false; // Out of data cache.
+ }
+ // Don't touch this until you protect / unprotect the code.
+ const size_t reserve_size = sizeof(OatQuickMethodHeader) + quick_code->size() + 32;
+ uint8_t* const code_reserve = code_cache->ReserveCode(self, reserve_size);
+ if (code_reserve == nullptr) {
+ return false;
+ }
+ auto* code_ptr = WriteMethodHeaderAndCode(
+ compiled_method, code_reserve, code_reserve + reserve_size, mapping_table_ptr,
+ vmap_table_ptr, gc_map_ptr);
+
+ const size_t thumb_offset = compiled_method->CodeDelta();
+ const uint32_t code_offset = code_ptr - base + thumb_offset;
+ *out_method = OatFile::OatMethod(base, code_offset);
+ DCHECK_EQ(out_method->GetGcMap(), gc_map_ptr);
+ DCHECK_EQ(out_method->GetMappingTable(), mapping_table_ptr);
+ DCHECK_EQ(out_method->GetVmapTable(), vmap_table_ptr);
+ DCHECK_EQ(out_method->GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes());
+ DCHECK_EQ(out_method->GetCoreSpillMask(), compiled_method->GetCoreSpillMask());
+ DCHECK_EQ(out_method->GetFpSpillMask(), compiled_method->GetFpSpillMask());
+ VLOG(jit) << "JIT added " << PrettyMethod(method) << "@" << method << " ccache_size="
+ << PrettySize(code_cache->CodeCacheSize()) << ": " << reinterpret_cast<void*>(code_ptr)
+ << "," << reinterpret_cast<void*>(code_ptr + code_size);
+ return true;
+}
+
+bool JitCompiler::MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method) {
+ CHECK(method != nullptr);
+ CHECK(compiled_method != nullptr);
+ OatFile::OatMethod oat_method(nullptr, 0);
+ if (!AddToCodeCache(method, compiled_method, &oat_method)) {
+ return false;
+ }
+ // TODO: Flush instruction cache.
+ oat_method.LinkMethod(method);
+ CHECK(Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method))
+ << PrettyMethod(method);
+ return true;
+}
+
+} // namespace jit
+} // namespace art
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
new file mode 100644
index 0000000..0876499
--- /dev/null
+++ b/compiler/jit/jit_compiler.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JIT_JIT_COMPILER_H_
+#define ART_COMPILER_JIT_JIT_COMPILER_H_
+
+#include "base/mutex.h"
+#include "compiler_callbacks.h"
+#include "compiled_method.h"
+#include "dex/verification_results.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
+#include "oat_file.h"
+
+namespace art {
+
+class InstructionSetFeatures;
+
+namespace mirror {
+class ArtMethod;
+}
+
+namespace jit {
+
+class JitCompiler {
+ public:
+ static JitCompiler* Create();
+ virtual ~JitCompiler();
+ bool CompileMethod(Thread* self, mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // This is in the compiler since the runtime doesn't have access to the compiled method
+ // structures.
+ bool AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod* compiled_method,
+ OatFile::OatMethod* out_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ CompilerCallbacks* GetCompilerCallbacks() const;
+ size_t GetTotalCompileTime() const {
+ return total_time_;
+ }
+
+ private:
+ uint64_t total_time_;
+ std::unique_ptr<CompilerOptions> compiler_options_;
+ std::unique_ptr<CumulativeLogger> cumulative_logger_;
+ std::unique_ptr<VerificationResults> verification_results_;
+ std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
+ std::unique_ptr<CompilerCallbacks> callbacks_;
+ std::unique_ptr<CompilerDriver> compiler_driver_;
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
+
+ explicit JitCompiler();
+ uint8_t* WriteMethodHeaderAndCode(
+ const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end,
+ const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
+ bool MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+} // namespace jit
+
+} // namespace art
+
+#endif // ART_COMPILER_JIT_JIT_COMPILER_H_
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 669c3bb..d3690b2 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -31,6 +31,10 @@
S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15
};
+static const SRegister kHFSCalleeSaveRegisters[] = {
+ S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31
+};
+
static const DRegister kHFDArgumentRegisters[] = {
D0, D1, D2, D3, D4, D5, D6, D7
};
@@ -226,6 +230,10 @@
callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R8));
callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R10));
callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R11));
+
+ for (size_t i = 0; i < arraysize(kHFSCalleeSaveRegisters); ++i) {
+ callee_save_regs_.push_back(ArmManagedRegister::FromSRegister(kHFSCalleeSaveRegisters[i]));
+ }
}
uint32_t ArmJniCallingConvention::CoreSpillMask() const {
@@ -235,6 +243,14 @@
return result;
}
+uint32_t ArmJniCallingConvention::FpSpillMask() const {
+ uint32_t result = 0;
+ for (size_t i = 0; i < arraysize(kHFSCalleeSaveRegisters); ++i) {
+ result |= (1 << kHFSCalleeSaveRegisters[i]);
+ }
+ return result;
+}
+
ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
return ArmManagedRegister::FromCoreRegister(R2);
}
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index 604ce1c..dbecb8e 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -63,9 +63,7 @@
}
ManagedRegister ReturnScratchRegister() const OVERRIDE;
uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE {
- return 0; // Floats aren't spilled in JNI down call
- }
+ uint32_t FpSpillMask() const OVERRIDE;
bool IsCurrentParamInRegister() OVERRIDE;
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index b9c8178..05eb80a 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -38,6 +38,10 @@
S0, S1, S2, S3, S4, S5, S6, S7
};
+static const DRegister kDCalleeSaveRegisters[] = {
+ D8, D9, D10, D11, D12, D13, D14, D15
+};
+
// Calling convention
ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
return Arm64ManagedRegister::FromXRegister(X20); // saved on entry restored on exit
@@ -166,6 +170,10 @@
callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X28));
callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X29));
callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X30));
+
+ for (size_t i = 0; i < arraysize(kDCalleeSaveRegisters); ++i) {
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(kDCalleeSaveRegisters[i]));
+ }
}
uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
@@ -184,10 +192,11 @@
}
uint32_t Arm64JniCallingConvention::FpSpillMask() const {
- // Compute spill mask to agree with callee saves initialized in the constructor
- // Note: All callee-save fp registers will be preserved by aapcs64. And they are not used
- // in the jni method.
- return 0;
+ uint32_t result = 0;
+ for (size_t i = 0; i < arraysize(kDCalleeSaveRegisters); ++i) {
+ result |= (1 << kDCalleeSaveRegisters[i]);
+ }
+ return result;
}
ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 9c0157e..c32a992 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -403,6 +403,7 @@
image_writer_(image_writer),
dex_files_(&dex_files),
size_(0u),
+ bss_size_(0u),
oat_data_offset_(0u),
image_file_location_oat_checksum_(image_file_location_oat_checksum),
image_file_location_oat_begin_(image_file_location_oat_begin),
@@ -549,7 +550,7 @@
struct OatWriter::VmapTableDataAccess {
static const SwapVector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
- return &compiled_method->GetVmapTable();
+ return compiled_method->GetVmapTable();
}
static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index e020d31..fd2ccae 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -96,6 +96,10 @@
return size_;
}
+ size_t GetBssSize() const {
+ return bss_size_;
+ }
+
const std::vector<uintptr_t>& GetAbsolutePatchLocations() const {
return absolute_patch_locations_;
}
@@ -266,6 +270,9 @@
// Size required for Oat data structures.
size_t size_;
+ // The size of the required .bss section holding the DexCache data.
+ size_t bss_size_;
+
// Offset of the oat data from the start of the mmapped region of the elf file.
size_t oat_data_offset_;
@@ -341,8 +348,8 @@
if (UNLIKELY(lhs->GetMappingTable() != rhs->GetMappingTable())) {
return lhs->GetMappingTable() < rhs->GetMappingTable();
}
- if (UNLIKELY(&lhs->GetVmapTable() != &rhs->GetVmapTable())) {
- return &lhs->GetVmapTable() < &rhs->GetVmapTable();
+ if (UNLIKELY(lhs->GetVmapTable() != rhs->GetVmapTable())) {
+ return lhs->GetVmapTable() < rhs->GetVmapTable();
}
if (UNLIKELY(lhs->GetGcMap() != rhs->GetGcMap())) {
return lhs->GetGcMap() < rhs->GetGcMap();
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index bf0804a..070981e 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -904,6 +904,21 @@
FindAndHandlePartialArrayLength(ushr);
}
+ void VisitAnd(HAnd* instruction) {
+ if (instruction->GetRight()->IsIntConstant()) {
+ int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue();
+ if (constant > 0) {
+ // constant serves as a mask so any number masked with it
+ // gets a [0, constant] value range.
+ ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
+ GetGraph()->GetArena(),
+ ValueBound(nullptr, 0),
+ ValueBound(nullptr, constant));
+ GetValueRangeMap(instruction->GetBlock())->Overwrite(instruction->GetId(), range);
+ }
+ }
+ }
+
void VisitNewArray(HNewArray* new_array) {
HInstruction* len = new_array->InputAt(0);
if (!len->IsIntConstant()) {
diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h
index 05cb185..9e98ccf 100644
--- a/compiler/optimizing/bounds_check_elimination.h
+++ b/compiler/optimizing/bounds_check_elimination.h
@@ -23,10 +23,13 @@
class BoundsCheckElimination : public HOptimization {
public:
- explicit BoundsCheckElimination(HGraph* graph) : HOptimization(graph, true, "BCE") {}
+ explicit BoundsCheckElimination(HGraph* graph)
+ : HOptimization(graph, true, kBoundsCheckEliminiationPassName) {}
void Run() OVERRIDE;
+ static constexpr const char* kBoundsCheckEliminiationPassName = "BCE";
+
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckElimination);
};
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 3e4a616..96196de 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -80,6 +80,8 @@
bool BuildGraph(const DexFile::CodeItem& code);
+ static constexpr const char* kBuilderPassName = "builder";
+
private:
// Analyzes the dex instruction and adds HInstruction to the graph
// to execute that instruction. Returns whether the instruction can
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 2a57fdc..ba5f7d8 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -386,7 +386,9 @@
compiler_options);
}
case kArm64: {
- return new arm64::CodeGeneratorARM64(graph, compiler_options);
+ return new arm64::CodeGeneratorARM64(graph,
+ *isa_features.AsArm64InstructionSetFeatures(),
+ compiler_options);
}
case kMips:
return nullptr;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 729bab7..c21084a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -16,6 +16,7 @@
#include "code_generator_arm64.h"
+#include "arch/arm64/instruction_set_features_arm64.h"
#include "common_arm64.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -397,7 +398,9 @@
return next_location;
}
-CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, const CompilerOptions& compiler_options)
+CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
+ const Arm64InstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options)
: CodeGenerator(graph,
kNumberOfAllocatableRegisters,
kNumberOfAllocatableFPRegisters,
@@ -408,7 +411,8 @@
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this) {
+ move_resolver_(graph->GetArena(), this),
+ isa_features_(isa_features) {
// Save the link register (containing the return address) to mimic Quick.
AddAllocatedRegister(LocationFrom(lr));
}
@@ -998,9 +1002,10 @@
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireW();
size_t status_offset = mirror::Class::StatusOffset().SizeValue();
+ bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
- if (kUseAcquireRelease) {
+ if (use_acquire_release) {
// TODO(vixl): Let the MacroAssembler handle MemOperand.
__ Add(temp, class_reg, status_offset);
__ Ldar(temp, HeapOperand(temp));
@@ -1689,9 +1694,10 @@
void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset());
+ bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
if (instruction->IsVolatile()) {
- if (kUseAcquireRelease) {
+ if (use_acquire_release) {
// NB: LoadAcquire will record the pc info if needed.
codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
} else {
@@ -1718,9 +1724,10 @@
CPURegister value = InputCPURegisterAt(instruction, 1);
Offset offset = instruction->GetFieldOffset();
Primitive::Type field_type = instruction->GetFieldType();
+ bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
if (instruction->IsVolatile()) {
- if (kUseAcquireRelease) {
+ if (use_acquire_release) {
codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
@@ -2437,9 +2444,10 @@
void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset());
+ bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
if (instruction->IsVolatile()) {
- if (kUseAcquireRelease) {
+ if (use_acquire_release) {
// NB: LoadAcquire will record the pc info if needed.
codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
} else {
@@ -2464,9 +2472,10 @@
CPURegister value = InputCPURegisterAt(instruction, 1);
Offset offset = instruction->GetFieldOffset();
Primitive::Type field_type = instruction->GetFieldType();
+ bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
if (instruction->IsVolatile()) {
- if (kUseAcquireRelease) {
+ if (use_acquire_release) {
codegen_->StoreRelease(field_type, value, HeapOperand(cls, offset));
} else {
GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index afb7fc3..48961d6 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -32,10 +32,6 @@
class CodeGeneratorARM64;
-// TODO: Tune the use of Load-Acquire, Store-Release vs Data Memory Barriers.
-// For now we prefer the use of load-acquire, store-release over explicit memory barriers.
-static constexpr bool kUseAcquireRelease = true;
-
// Use a local definition to prevent copying mistakes.
static constexpr size_t kArm64WordSize = kArm64PointerSize;
@@ -195,7 +191,9 @@
class CodeGeneratorARM64 : public CodeGenerator {
public:
- CodeGeneratorARM64(HGraph* graph, const CompilerOptions& compiler_options);
+ CodeGeneratorARM64(HGraph* graph,
+ const Arm64InstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options);
virtual ~CodeGeneratorARM64() {}
void GenerateFrameEntry() OVERRIDE;
@@ -273,6 +271,10 @@
return InstructionSet::kArm64;
}
+ const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const {
+ return isa_features_;
+ }
+
void Initialize() OVERRIDE {
HGraph* graph = GetGraph();
int length = graph->GetBlocks().Size();
@@ -317,6 +319,7 @@
InstructionCodeGeneratorARM64 instruction_visitor_;
ParallelMoveResolverARM64 move_resolver_;
Arm64Assembler assembler_;
+ const Arm64InstructionSetFeatures& isa_features_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
};
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 7b35cfd..116dd15 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -819,7 +819,7 @@
// Materialized condition, compare against 0.
Location lhs = if_instr->GetLocations()->InAt(0);
if (lhs.IsRegister()) {
- __ cmpl(lhs.AsRegister<Register>(), Immediate(0));
+ __ testl(lhs.AsRegister<Register>(), lhs.AsRegister<Register>());
} else {
__ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0));
}
@@ -836,9 +836,12 @@
if (rhs.IsRegister()) {
__ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>());
} else if (rhs.IsConstant()) {
- HIntConstant* instruction = rhs.GetConstant()->AsIntConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ cmpl(lhs.AsRegister<Register>(), imm);
+ int32_t constant = rhs.GetConstant()->AsIntConstant()->GetValue();
+ if (constant == 0) {
+ __ testl(lhs.AsRegister<Register>(), lhs.AsRegister<Register>());
+ } else {
+ __ cmpl(lhs.AsRegister<Register>(), Immediate(constant));
+ }
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
@@ -914,16 +917,19 @@
Register reg = locations->Out().AsRegister<Register>();
// Clear register: setcc only sets the low byte.
__ xorl(reg, reg);
- if (locations->InAt(1).IsRegister()) {
- __ cmpl(locations->InAt(0).AsRegister<Register>(),
- locations->InAt(1).AsRegister<Register>());
- } else if (locations->InAt(1).IsConstant()) {
- HConstant* instruction = locations->InAt(1).GetConstant();
- Immediate imm(CodeGenerator::GetInt32ValueOf(instruction));
- __ cmpl(locations->InAt(0).AsRegister<Register>(), imm);
+ Location lhs = locations->InAt(0);
+ Location rhs = locations->InAt(1);
+ if (rhs.IsRegister()) {
+ __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>());
+ } else if (rhs.IsConstant()) {
+ int32_t constant = rhs.GetConstant()->AsIntConstant()->GetValue();
+ if (constant == 0) {
+ __ testl(lhs.AsRegister<Register>(), lhs.AsRegister<Register>());
+ } else {
+ __ cmpl(lhs.AsRegister<Register>(), Immediate(constant));
+ }
} else {
- __ cmpl(locations->InAt(0).AsRegister<Register>(),
- Address(ESP, locations->InAt(1).GetStackIndex()));
+ __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
__ setb(X86Condition(comp->GetCondition()), reg);
}
@@ -1798,7 +1804,13 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
@@ -1824,15 +1836,26 @@
LocationSummary* locations = add->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
- DCHECK(first.Equals(locations->Out()));
+ Location out = locations->Out();
+
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- __ addl(first.AsRegister<Register>(), second.AsRegister<Register>());
+ if (out.AsRegister<Register>() == first.AsRegister<Register>()) {
+ __ addl(out.AsRegister<Register>(), second.AsRegister<Register>());
+ } else {
+ __ leal(out.AsRegister<Register>(), Address(
+ first.AsRegister<Register>(), second.AsRegister<Register>(), TIMES_1, 0));
+ }
} else if (second.IsConstant()) {
- __ addl(first.AsRegister<Register>(),
- Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ int32_t value = second.GetConstant()->AsIntConstant()->GetValue();
+ if (out.AsRegister<Register>() == first.AsRegister<Register>()) {
+ __ addl(out.AsRegister<Register>(), Immediate(value));
+ } else {
+ __ leal(out.AsRegister<Register>(), Address(first.AsRegister<Register>(), value));
+ }
} else {
+ DCHECK(first.Equals(locations->Out()));
__ addl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex()));
}
break;
@@ -3502,12 +3525,16 @@
} else if (source.IsConstant()) {
HConstant* constant = source.GetConstant();
if (constant->IsIntConstant() || constant->IsNullConstant()) {
- Immediate imm(CodeGenerator::GetInt32ValueOf(constant));
+ int32_t value = CodeGenerator::GetInt32ValueOf(constant);
if (destination.IsRegister()) {
- __ movl(destination.AsRegister<Register>(), imm);
+ if (value == 0) {
+ __ xorl(destination.AsRegister<Register>(), destination.AsRegister<Register>());
+ } else {
+ __ movl(destination.AsRegister<Register>(), Immediate(value));
+ }
} else {
DCHECK(destination.IsStackSlot()) << destination;
- __ movl(Address(ESP, destination.GetStackIndex()), imm);
+ __ movl(Address(ESP, destination.GetStackIndex()), Immediate(value));
}
} else {
DCHECK(constant->IsFloatConstant());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 74adb31..adc022a 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1868,8 +1868,19 @@
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
+ // We can use a leaq or addq if the constant can fit in an immediate.
+ HInstruction* rhs = add->InputAt(1);
+ bool is_int32_constant = false;
+ if (rhs->IsLongConstant()) {
+ int64_t value = rhs->AsLongConstant()->GetValue();
+ if (static_cast<int32_t>(value) == value) {
+ is_int32_constant = true;
+ }
+ }
+ locations->SetInAt(1,
+ is_int32_constant ? Location::RegisterOrConstant(rhs) :
+ Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -1917,7 +1928,25 @@
}
case Primitive::kPrimLong: {
- __ addq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second.IsRegister()) {
+ if (out.AsRegister<Register>() == first.AsRegister<Register>()) {
+ __ addq(out.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ } else {
+ __ leaq(out.AsRegister<CpuRegister>(), Address(
+ first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>(), TIMES_1, 0));
+ }
+ } else {
+ DCHECK(second.IsConstant());
+ int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
+ int32_t int32_value = Low32Bits(value);
+ DCHECK_EQ(int32_value, value);
+ if (out.AsRegister<Register>() == first.AsRegister<Register>()) {
+ __ addq(out.AsRegister<CpuRegister>(), Immediate(int32_value));
+ } else {
+ __ leaq(out.AsRegister<CpuRegister>(), Address(
+ first.AsRegister<CpuRegister>(), int32_value));
+ }
+ }
break;
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e0e0b4c..868fc5b 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -18,6 +18,7 @@
#include "arch/instruction_set.h"
#include "arch/arm/instruction_set_features_arm.h"
+#include "arch/arm64/instruction_set_features_arm64.h"
#include "base/macros.h"
#include "builder.h"
#include "code_generator_arm.h"
@@ -115,9 +116,9 @@
Run(allocator, codegenX86, has_result, expected);
}
- std::unique_ptr<const ArmInstructionSetFeatures> features(
+ std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
ArmInstructionSetFeatures::FromCppDefines());
- TestCodeGeneratorARM codegenARM(graph, *features.get(), compiler_options);
+ TestCodeGeneratorARM codegenARM(graph, *features_arm.get(), compiler_options);
codegenARM.CompileBaseline(&allocator, true);
if (kRuntimeISA == kArm || kRuntimeISA == kThumb2) {
Run(allocator, codegenARM, has_result, expected);
@@ -129,7 +130,9 @@
Run(allocator, codegenX86_64, has_result, expected);
}
- arm64::CodeGeneratorARM64 codegenARM64(graph, compiler_options);
+ std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
+ Arm64InstructionSetFeatures::FromCppDefines());
+ arm64::CodeGeneratorARM64 codegenARM64(graph, *features_arm64.get(), compiler_options);
codegenARM64.CompileBaseline(&allocator, true);
if (kRuntimeISA == kArm64) {
Run(allocator, codegenARM64, has_result, expected);
@@ -166,7 +169,9 @@
compiler_options);
RunCodeOptimized(&codegenARM, graph, hook_before_codegen, has_result, expected);
} else if (kRuntimeISA == kArm64) {
- arm64::CodeGeneratorARM64 codegenARM64(graph, compiler_options);
+ arm64::CodeGeneratorARM64 codegenARM64(graph,
+ *Arm64InstructionSetFeatures::FromCppDefines(),
+ compiler_options);
RunCodeOptimized(&codegenARM64, graph, hook_before_codegen, has_result, expected);
} else if (kRuntimeISA == kX86) {
x86::CodeGeneratorX86 codegenX86(graph, compiler_options);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index c592737..cabfa48 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -17,8 +17,10 @@
#include "graph_visualizer.h"
#include "code_generator.h"
+#include "licm.h"
#include "nodes.h"
#include "optimization.h"
+#include "register_allocator.h"
#include "ssa_liveness_analysis.h"
namespace art {
@@ -188,6 +190,10 @@
output_ << " " << phi->GetRegNumber();
}
+ bool IsPass(const char* name) {
+ return strcmp(pass_name_, name) == 0;
+ }
+
void PrintInstruction(HInstruction* instruction) {
output_ << instruction->DebugName();
instruction->Accept(this);
@@ -211,7 +217,7 @@
}
output_ << "])";
}
- if (pass_name_ == kLivenessPassName
+ if (IsPass(SsaLivenessAnalysis::kLivenessPassName)
&& is_after_pass_
&& instruction->GetLifetimePosition() != kNoLifetime) {
output_ << " (liveness: " << instruction->GetLifetimePosition();
@@ -221,7 +227,7 @@
interval.Dump(output_);
}
output_ << ")";
- } else if (pass_name_ == kRegisterAllocatorPassName && is_after_pass_) {
+ } else if (IsPass(RegisterAllocator::kRegisterAllocatorPassName) && is_after_pass_) {
LocationSummary* locations = instruction->GetLocations();
if (locations != nullptr) {
output_ << " ( ";
@@ -236,7 +242,7 @@
}
}
output_ << " (liveness: " << instruction->GetLifetimePosition() << ")";
- } else if (pass_name_ == kLoopInvariantCodeMotionPassName) {
+ } else if (IsPass(LICM::kLoopInvariantCodeMotionPassName)) {
output_ << " ( loop_header:";
HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
if (info == nullptr) {
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index 57e0487..e74d969 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -27,10 +27,12 @@
class GVNOptimization : public HOptimization {
public:
GVNOptimization(HGraph* graph, const SideEffectsAnalysis& side_effects)
- : HOptimization(graph, true, "GVN"), side_effects_(side_effects) {}
+ : HOptimization(graph, true, kGlobalValueNumberingPassName), side_effects_(side_effects) {}
void Run() OVERRIDE;
+ static constexpr const char* kGlobalValueNumberingPassName = "GVN";
+
private:
const SideEffectsAnalysis& side_effects_;
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 8e9cf83..2b08d3d 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -35,13 +35,15 @@
CompilerDriver* compiler_driver,
OptimizingCompilerStats* stats,
size_t depth = 0)
- : HOptimization(outer_graph, true, "inliner", stats),
+ : HOptimization(outer_graph, true, kInlinerPassName, stats),
outer_compilation_unit_(outer_compilation_unit),
compiler_driver_(compiler_driver),
depth_(depth) {}
void Run() OVERRIDE;
+ static constexpr const char* kInlinerPassName = "inliner";
+
private:
bool TryInline(HInvoke* invoke_instruction, uint32_t method_index, InvokeType invoke_type) const;
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index a7ff755..0244620 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -30,9 +30,11 @@
public:
InstructionSimplifier(HGraph* graph,
OptimizingCompilerStats* stats = nullptr,
- const char* name = "instruction_simplifier")
+ const char* name = kInstructionSimplifierPassName)
: HOptimization(graph, true, name, stats) {}
+ static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
+
void Run() OVERRIDE;
};
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 29cc8ef..dbb7cba 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -29,11 +29,13 @@
class IntrinsicsRecognizer : public HOptimization {
public:
IntrinsicsRecognizer(HGraph* graph, const DexFile* dex_file, CompilerDriver* driver)
- : HOptimization(graph, true, "intrinsics_recognition"),
+ : HOptimization(graph, true, kIntrinsicsRecognizerPassName),
dex_file_(dex_file), driver_(driver) {}
void Run() OVERRIDE;
+ static constexpr const char* kIntrinsicsRecognizerPassName = "intrinsics_recognition";
+
private:
const DexFile* dex_file_;
CompilerDriver* driver_;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 8874edc..1ddff8a 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -16,6 +16,7 @@
#include "intrinsics_arm64.h"
+#include "arch/arm64/instruction_set_features_arm64.h"
#include "code_generator_arm64.h"
#include "common_arm64.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -682,10 +683,11 @@
Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
Register trg = RegisterFrom(locations->Out(), type);
+ bool use_acquire_release = codegen->GetInstructionSetFeatures().PreferAcquireRelease();
MemOperand mem_op(base.X(), offset);
if (is_volatile) {
- if (kUseAcquireRelease) {
+ if (use_acquire_release) {
codegen->LoadAcquire(invoke, trg, mem_op);
} else {
codegen->Load(type, trg, mem_op);
@@ -792,11 +794,12 @@
Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
Register value = RegisterFrom(locations->InAt(3), type);
+ bool use_acquire_release = codegen->GetInstructionSetFeatures().PreferAcquireRelease();
MemOperand mem_op(base.X(), offset);
if (is_volatile || is_ordered) {
- if (kUseAcquireRelease) {
+ if (use_acquire_release) {
codegen->StoreRelease(type, value, mem_op);
} else {
__ Dmb(InnerShareable, BarrierAll);
@@ -856,10 +859,7 @@
}
static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM64* codegen) {
- // TODO: Currently we use acquire-release load-stores in the CAS loop. One could reasonably write
- // a version relying on simple exclusive load-stores and barriers instead.
- static_assert(kUseAcquireRelease, "Non-acquire-release inlined CAS not implemented, yet.");
-
+ bool use_acquire_release = codegen->GetInstructionSetFeatures().PreferAcquireRelease();
vixl::MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
Register out = WRegisterFrom(locations->Out()); // Boolean result.
@@ -889,15 +889,23 @@
// result = tmp_value != 0;
vixl::Label loop_head, exit_loop;
- __ Bind(&loop_head);
-
- __ Ldaxr(tmp_value, MemOperand(tmp_ptr));
- __ Cmp(tmp_value, expected);
- __ B(&exit_loop, ne);
-
- __ Stlxr(tmp_32, value, MemOperand(tmp_ptr));
- __ Cbnz(tmp_32, &loop_head);
-
+ if (use_acquire_release) {
+ __ Bind(&loop_head);
+ __ Ldaxr(tmp_value, MemOperand(tmp_ptr));
+ __ Cmp(tmp_value, expected);
+ __ B(&exit_loop, ne);
+ __ Stlxr(tmp_32, value, MemOperand(tmp_ptr));
+ __ Cbnz(tmp_32, &loop_head);
+ } else {
+ __ Dmb(InnerShareable, BarrierWrites);
+ __ Bind(&loop_head);
+ __ Ldxr(tmp_value, MemOperand(tmp_ptr));
+ __ Cmp(tmp_value, expected);
+ __ B(&exit_loop, ne);
+ __ Stxr(tmp_32, value, MemOperand(tmp_ptr));
+ __ Cbnz(tmp_32, &loop_head);
+ __ Dmb(InnerShareable, BarrierAll);
+ }
__ Bind(&exit_loop);
__ Cset(out, eq);
}
diff --git a/compiler/optimizing/licm.h b/compiler/optimizing/licm.h
index 4812394..cb6170e 100644
--- a/compiler/optimizing/licm.h
+++ b/compiler/optimizing/licm.h
@@ -31,6 +31,8 @@
void Run() OVERRIDE;
+ static constexpr const char* kLoopInvariantCodeMotionPassName = "licm";
+
private:
const SideEffectsAnalysis& side_effects_;
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 17914e8..c102c4f 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -399,11 +399,11 @@
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
LiveRange* range = interval->GetFirstRange();
ASSERT_EQ(2u, range->GetStart());
- ASSERT_EQ(16u, range->GetEnd());
+ ASSERT_EQ(17u, range->GetEnd());
range = range->GetNext();
ASSERT_TRUE(range != nullptr);
ASSERT_EQ(20u, range->GetStart());
- ASSERT_EQ(22u, range->GetEnd());
+ ASSERT_EQ(23u, range->GetEnd());
ASSERT_TRUE(range->GetNext() == nullptr);
// Test for the 4 constant.
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index af39e09..8b20281 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -22,12 +22,6 @@
namespace art {
-static const char* kBuilderPassName = "builder";
-static const char* kSsaBuilderPassName = "ssa_builder";
-static const char* kLivenessPassName = "liveness";
-static const char* kRegisterAllocatorPassName = "register";
-static const char* kLoopInvariantCodeMotionPassName = "licm";
-
/**
* Abstraction to implement an optimization pass.
*/
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 2fef8c7..eb98424 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -56,7 +56,7 @@
*/
class CodeVectorAllocator FINAL : public CodeAllocator {
public:
- CodeVectorAllocator() {}
+ CodeVectorAllocator() : size_(0) {}
virtual uint8_t* Allocate(size_t size) {
size_ = size;
@@ -361,11 +361,11 @@
PrepareForRegisterAllocation(graph).Run();
SsaLivenessAnalysis liveness(*graph, codegen);
{
- PassInfo pass_info(kLivenessPassName, pass_info_printer);
+ PassInfo pass_info(SsaLivenessAnalysis::kLivenessPassName, pass_info_printer);
liveness.Analyze();
}
{
- PassInfo pass_info(kRegisterAllocatorPassName, pass_info_printer);
+ PassInfo pass_info(RegisterAllocator::kRegisterAllocatorPassName, pass_info_printer);
RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters();
}
@@ -495,7 +495,7 @@
VLOG(compiler) << "Building " << method_name;
{
- PassInfo pass_info(kBuilderPassName, &pass_info_printer);
+ PassInfo pass_info(HGraphBuilder::kBuilderPassName, &pass_info_printer);
if (!builder.BuildGraph(*code_item)) {
CHECK(!shouldCompile) << "Could not build graph in optimizing compiler";
return nullptr;
@@ -508,7 +508,7 @@
VLOG(compiler) << "Optimizing " << method_name;
{
- PassInfo pass_info(kSsaBuilderPassName, &pass_info_printer);
+ PassInfo pass_info(SsaBuilder::kSsaBuilderPassName, &pass_info_printer);
if (!graph->TryBuildingSsa()) {
// We could not transform the graph to SSA, bailout.
LOG(INFO) << "Skipping compilation of " << method_name << ": it contains a non natural loop";
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 815caab..733e18e 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -34,7 +34,7 @@
const DexFile& dex_file,
const DexCompilationUnit& dex_compilation_unit,
StackHandleScopeCollection* handles)
- : HOptimization(graph, true, "reference_type_propagation"),
+ : HOptimization(graph, true, kReferenceTypePropagationPassName),
dex_file_(dex_file),
dex_compilation_unit_(dex_compilation_unit),
handles_(handles),
@@ -42,6 +42,8 @@
void Run() OVERRIDE;
+ static constexpr const char* kReferenceTypePropagationPassName = "reference_type_propagation";
+
private:
void VisitNewInstance(HNewInstance* new_instance);
void VisitLoadClass(HLoadClass* load_class);
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index ff2f106..579f069 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -81,6 +81,8 @@
+ double_spill_slots_.Size();
}
+ static constexpr const char* kRegisterAllocatorPassName = "register";
+
private:
// Main methods of the allocator.
void LinearScan();
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index e5d06a9..b757a3b 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -322,9 +322,9 @@
TEST(RegisterAllocatorTest, FirstRegisterUse) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
- Instruction::ADD_INT_LIT8 | 1 << 8, 1 << 8,
- Instruction::ADD_INT_LIT8 | 0 << 8, 1 << 8,
- Instruction::ADD_INT_LIT8 | 1 << 8, 1 << 8 | 1,
+ Instruction::XOR_INT_LIT8 | 1 << 8, 1 << 8,
+ Instruction::XOR_INT_LIT8 | 0 << 8, 1 << 8,
+ Instruction::XOR_INT_LIT8 | 1 << 8, 1 << 8 | 1,
Instruction::RETURN_VOID);
ArenaPool pool;
@@ -334,27 +334,27 @@
SsaLivenessAnalysis liveness(*graph, &codegen);
liveness.Analyze();
- HAdd* first_add = graph->GetBlocks().Get(1)->GetFirstInstruction()->AsAdd();
- HAdd* last_add = graph->GetBlocks().Get(1)->GetLastInstruction()->GetPrevious()->AsAdd();
- ASSERT_EQ(last_add->InputAt(0), first_add);
- LiveInterval* interval = first_add->GetLiveInterval();
- ASSERT_EQ(interval->GetEnd(), last_add->GetLifetimePosition());
+ HXor* first_xor = graph->GetBlocks().Get(1)->GetFirstInstruction()->AsXor();
+ HXor* last_xor = graph->GetBlocks().Get(1)->GetLastInstruction()->GetPrevious()->AsXor();
+ ASSERT_EQ(last_xor->InputAt(0), first_xor);
+ LiveInterval* interval = first_xor->GetLiveInterval();
+ ASSERT_EQ(interval->GetEnd(), last_xor->GetLifetimePosition());
ASSERT_TRUE(interval->GetNextSibling() == nullptr);
// We need a register for the output of the instruction.
- ASSERT_EQ(interval->FirstRegisterUse(), first_add->GetLifetimePosition());
+ ASSERT_EQ(interval->FirstRegisterUse(), first_xor->GetLifetimePosition());
// Split at the next instruction.
- interval = interval->SplitAt(first_add->GetLifetimePosition() + 2);
+ interval = interval->SplitAt(first_xor->GetLifetimePosition() + 2);
// The user of the split is the last add.
- ASSERT_EQ(interval->FirstRegisterUse(), last_add->GetLifetimePosition());
+ ASSERT_EQ(interval->FirstRegisterUse(), last_xor->GetLifetimePosition());
// Split before the last add.
- LiveInterval* new_interval = interval->SplitAt(last_add->GetLifetimePosition() - 1);
+ LiveInterval* new_interval = interval->SplitAt(last_xor->GetLifetimePosition() - 1);
// Ensure the current interval has no register use...
ASSERT_EQ(interval->FirstRegisterUse(), kNoLifetime);
// And the new interval has it for the last add.
- ASSERT_EQ(new_interval->FirstRegisterUse(), last_add->GetLifetimePosition());
+ ASSERT_EQ(new_interval->FirstRegisterUse(), last_xor->GetLifetimePosition());
}
TEST(RegisterAllocatorTest, DeadPhi) {
@@ -634,9 +634,9 @@
}
}
-static HGraph* BuildTwoAdds(ArenaAllocator* allocator,
- HInstruction** first_add,
- HInstruction** second_add) {
+static HGraph* BuildTwoSubs(ArenaAllocator* allocator,
+ HInstruction** first_sub,
+ HInstruction** second_sub) {
HGraph* graph = new (allocator) HGraph(allocator);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -652,10 +652,10 @@
graph->AddBlock(block);
entry->AddSuccessor(block);
- *first_add = new (allocator) HAdd(Primitive::kPrimInt, parameter, constant1);
- block->AddInstruction(*first_add);
- *second_add = new (allocator) HAdd(Primitive::kPrimInt, *first_add, constant2);
- block->AddInstruction(*second_add);
+ *first_sub = new (allocator) HSub(Primitive::kPrimInt, parameter, constant1);
+ block->AddInstruction(*first_sub);
+ *second_sub = new (allocator) HSub(Primitive::kPrimInt, *first_sub, constant2);
+ block->AddInstruction(*second_sub);
block->AddInstruction(new (allocator) HExit());
return graph;
@@ -664,10 +664,10 @@
TEST(RegisterAllocatorTest, SameAsFirstInputHint) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HInstruction *first_add, *second_add;
+ HInstruction *first_sub, *second_sub;
{
- HGraph* graph = BuildTwoAdds(&allocator, &first_add, &second_add);
+ HGraph* graph = BuildTwoSubs(&allocator, &first_sub, &second_sub);
x86::CodeGeneratorX86 codegen(graph, CompilerOptions());
SsaLivenessAnalysis liveness(*graph, &codegen);
liveness.Analyze();
@@ -676,27 +676,27 @@
register_allocator.AllocateRegisters();
// Sanity check that in normal conditions, the registers are the same.
- ASSERT_EQ(first_add->GetLiveInterval()->GetRegister(), 1);
- ASSERT_EQ(second_add->GetLiveInterval()->GetRegister(), 1);
+ ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 1);
+ ASSERT_EQ(second_sub->GetLiveInterval()->GetRegister(), 1);
}
{
- HGraph* graph = BuildTwoAdds(&allocator, &first_add, &second_add);
+ HGraph* graph = BuildTwoSubs(&allocator, &first_sub, &second_sub);
x86::CodeGeneratorX86 codegen(graph, CompilerOptions());
SsaLivenessAnalysis liveness(*graph, &codegen);
liveness.Analyze();
// check that both adds get the same register.
// Don't use UpdateOutput because output is already allocated.
- first_add->InputAt(0)->GetLocations()->output_ = Location::RegisterLocation(2);
- ASSERT_EQ(first_add->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
- ASSERT_EQ(second_add->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
+ first_sub->InputAt(0)->GetLocations()->output_ = Location::RegisterLocation(2);
+ ASSERT_EQ(first_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
+ ASSERT_EQ(second_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
- ASSERT_EQ(first_add->GetLiveInterval()->GetRegister(), 2);
- ASSERT_EQ(second_add->GetLiveInterval()->GetRegister(), 2);
+ ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 2);
+ ASSERT_EQ(second_sub->GetLiveInterval()->GetRegister(), 2);
}
}
diff --git a/compiler/optimizing/side_effects_analysis.h b/compiler/optimizing/side_effects_analysis.h
index f1c98ac..415d10c 100644
--- a/compiler/optimizing/side_effects_analysis.h
+++ b/compiler/optimizing/side_effects_analysis.h
@@ -25,7 +25,7 @@
class SideEffectsAnalysis : public HOptimization {
public:
explicit SideEffectsAnalysis(HGraph* graph)
- : HOptimization(graph, true, "SideEffects"),
+ : HOptimization(graph, true, kSideEffectsAnalysisPassName),
graph_(graph),
block_effects_(graph->GetArena(), graph->GetBlocks().Size(), SideEffects::None()),
loop_effects_(graph->GetArena(), graph->GetBlocks().Size(), SideEffects::None()) {}
@@ -38,6 +38,8 @@
bool HasRun() const { return has_run_; }
+ static constexpr const char* kSideEffectsAnalysisPassName = "SideEffects";
+
private:
void UpdateLoopEffects(HLoopInformation* info, SideEffects effects);
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 148e959..f50da46 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -60,6 +60,8 @@
static HInstruction* GetReferenceTypeEquivalent(HInstruction* instruction);
+ static constexpr const char* kSsaBuilderPassName = "ssa_builder";
+
private:
// Locals for the current block being visited.
HEnvironment* current_locals_;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index bebb73b..d009390 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -477,12 +477,12 @@
}
}
-Location LiveInterval::GetLocationAt(size_t position) const {
+Location LiveInterval::GetLocationAt(size_t position) {
return GetIntervalAt(position).ToLocation();
}
-const LiveInterval& LiveInterval::GetIntervalAt(size_t position) const {
- const LiveInterval* current = this;
+const LiveInterval& LiveInterval::GetIntervalAt(size_t position) {
+ LiveInterval* current = this;
while (!current->Covers(position)) {
current = current->GetNextSibling();
DCHECK(current != nullptr);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 0e68a61..9ff2f20 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -322,18 +322,8 @@
return last_range_->GetEnd() <= position;
}
- bool Covers(size_t position) const {
- if (IsDeadAt(position)) {
- return false;
- }
- LiveRange* current = first_range_;
- while (current != nullptr) {
- if (position >= current->GetStart() && position < current->GetEnd()) {
- return true;
- }
- current = current->GetNext();
- }
- return false;
+ bool Covers(size_t position) {
+ return !IsDeadAt(position) && FindRangeAt(position) != nullptr;
}
/**
@@ -345,19 +335,19 @@
LiveRange* my_range = first_range_;
LiveRange* other_range = other->first_range_;
do {
- if (my_range->IntersectsWith(*other_range)) {
- return std::max(my_range->GetStart(), other_range->GetStart());
- } else if (my_range->IsBefore(*other_range)) {
+ if (my_range->IsBefore(*other_range)) {
my_range = my_range->GetNext();
if (my_range == nullptr) {
return kNoLifetime;
}
- } else {
- DCHECK(other_range->IsBefore(*my_range));
+ } else if (other_range->IsBefore(*my_range)) {
other_range = other_range->GetNext();
if (other_range == nullptr) {
return kNoLifetime;
}
+ } else {
+ DCHECK(my_range->IntersectsWith(*other_range));
+ return std::max(my_range->GetStart(), other_range->GetStart());
}
} while (true);
}
@@ -466,6 +456,7 @@
new_interval->parent_ = parent_;
new_interval->first_use_ = first_use_;
+ last_visited_range_ = nullptr;
LiveRange* current = first_range_;
LiveRange* previous = nullptr;
// Iterate over the ranges, and either find a range that covers this position, or
@@ -569,10 +560,10 @@
Location ToLocation() const;
// Returns the location of the interval following its siblings at `position`.
- Location GetLocationAt(size_t position) const;
+ Location GetLocationAt(size_t position);
// Finds the interval that covers `position`.
- const LiveInterval& GetIntervalAt(size_t position) const;
+ const LiveInterval& GetIntervalAt(size_t position);
// Returns whether `other` and `this` share the same kind of register.
bool SameRegisterKind(Location other) const;
@@ -698,6 +689,7 @@
: allocator_(allocator),
first_range_(nullptr),
last_range_(nullptr),
+ last_visited_range_(nullptr),
first_use_(nullptr),
type_(type),
next_sibling_(nullptr),
@@ -711,6 +703,41 @@
high_or_low_interval_(nullptr),
defined_by_(defined_by) {}
+ // Returns a LiveRange covering the given position or nullptr if no such range
+ // exists in the interval.
+ // This is a linear search optimized for multiple queries in a non-decreasing
+ // position order typical for linear scan register allocation.
+ LiveRange* FindRangeAt(size_t position) {
+ // Make sure operations on the interval didn't leave us with a cached result
+ // from a sibling.
+ if (kIsDebugBuild) {
+ if (last_visited_range_ != nullptr) {
+ DCHECK_GE(last_visited_range_->GetStart(), GetStart());
+ DCHECK_LE(last_visited_range_->GetEnd(), GetEnd());
+ }
+ }
+
+ // If this method was called earlier on a lower position, use that result as
+ // a starting point to save time. However, linear scan performs 3 scans:
+ // integers, floats, and resolution. Instead of resetting at the beginning
+ // of a scan, we do it here.
+ LiveRange* current;
+ if (last_visited_range_ != nullptr && position >= last_visited_range_->GetStart()) {
+ current = last_visited_range_;
+ } else {
+ current = first_range_;
+ }
+ while (current != nullptr && current->GetEnd() <= position) {
+ current = current->GetNext();
+ }
+ last_visited_range_ = current;
+ if (current != nullptr && position >= current->GetStart()) {
+ return current;
+ } else {
+ return nullptr;
+ }
+ }
+
ArenaAllocator* const allocator_;
// Ranges of this interval. We need a quick access to the last range to test
@@ -718,6 +745,10 @@
LiveRange* first_range_;
LiveRange* last_range_;
+ // Last visited range. This is a range search optimization leveraging the fact
+ // that the register allocator does a linear scan through the intervals.
+ LiveRange* last_visited_range_;
+
// Uses of this interval. Note that this linked list is shared amongst siblings.
UsePosition* first_use_;
@@ -816,6 +847,8 @@
return number_of_ssa_values_;
}
+ static constexpr const char* kLivenessPassName = "liveness";
+
private:
// Linearize the graph so that:
// (1): a block is always after its dominator,
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index 88a5279..c4b63ab 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -29,7 +29,7 @@
class SsaDeadPhiElimination : public HOptimization {
public:
explicit SsaDeadPhiElimination(HGraph* graph)
- : HOptimization(graph, true, "dead_phi_elimination"),
+ : HOptimization(graph, true, kSsaDeadPhiEliminationPassName),
worklist_(graph->GetArena(), kDefaultWorklistSize) {}
void Run() OVERRIDE;
@@ -37,6 +37,8 @@
void MarkDeadPhis();
void EliminateDeadPhis();
+ static constexpr const char* kSsaDeadPhiEliminationPassName = "dead_phi_elimination";
+
private:
GrowableArray<HPhi*> worklist_;
@@ -54,11 +56,13 @@
class SsaRedundantPhiElimination : public HOptimization {
public:
explicit SsaRedundantPhiElimination(HGraph* graph)
- : HOptimization(graph, true, "redundant_phi_elimination"),
+ : HOptimization(graph, true, kSsaRedundantPhiEliminationPassName),
worklist_(graph->GetArena(), kDefaultWorklistSize) {}
void Run() OVERRIDE;
+ static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination";
+
private:
GrowableArray<HPhi*> worklist_;
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index a52e6eb..a02191b 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -385,12 +385,24 @@
// Push callee saves and link register.
RegList push_list = 1 << LR;
size_t pushed_values = 1;
+ int32_t min_s = kNumberOfSRegisters;
+ int32_t max_s = -1;
for (size_t i = 0; i < callee_save_regs.size(); i++) {
- Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
- push_list |= 1 << reg;
- pushed_values++;
+ if (callee_save_regs.at(i).AsArm().IsCoreRegister()) {
+ Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
+ push_list |= 1 << reg;
+ pushed_values++;
+ } else {
+ CHECK(callee_save_regs.at(i).AsArm().IsSRegister());
+ min_s = std::min(static_cast<int>(callee_save_regs.at(i).AsArm().AsSRegister()), min_s);
+ max_s = std::max(static_cast<int>(callee_save_regs.at(i).AsArm().AsSRegister()), max_s);
+ }
}
PushList(push_list);
+ if (max_s != -1) {
+ pushed_values += 1 + max_s - min_s;
+ vpushs(static_cast<SRegister>(min_s), 1 + max_s - min_s);
+ }
// Increase frame to required size.
CHECK_GT(frame_size, pushed_values * kFramePointerSize); // Must at least have space for Method*.
@@ -427,10 +439,22 @@
// Compute callee saves to pop and PC.
RegList pop_list = 1 << PC;
size_t pop_values = 1;
+ int32_t min_s = kNumberOfSRegisters;
+ int32_t max_s = -1;
for (size_t i = 0; i < callee_save_regs.size(); i++) {
- Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
- pop_list |= 1 << reg;
- pop_values++;
+ if (callee_save_regs.at(i).AsArm().IsCoreRegister()) {
+ Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
+ pop_list |= 1 << reg;
+ pop_values++;
+ } else {
+ CHECK(callee_save_regs.at(i).AsArm().IsSRegister());
+ min_s = std::min(static_cast<int>(callee_save_regs.at(i).AsArm().AsSRegister()), min_s);
+ max_s = std::max(static_cast<int>(callee_save_regs.at(i).AsArm().AsSRegister()), max_s);
+ }
+ }
+
+ if (max_s != -1) {
+ pop_values += 1 + max_s - min_s;
}
// Decrease frame to start of callee saves.
@@ -438,6 +462,10 @@
size_t adjust = frame_size - (pop_values * kFramePointerSize);
DecreaseFrameSize(adjust);
+ if (max_s != -1) {
+ vpops(static_cast<SRegister>(min_s), 1 + max_s - min_s);
+ }
+
// Pop callee saves and PC.
PopList(pop_list);
}
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 21014c8..58c7367 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -639,6 +639,7 @@
}
constexpr size_t kFramePointerSize = 8;
+constexpr unsigned int kJniRefSpillRegsSize = 11 + 8;
void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
@@ -648,7 +649,7 @@
// TODO: *create APCS FP - end of FP chain;
// *add support for saving a different set of callee regs.
- // For now we check that the size of callee regs vector is 11.
+ // For now we check that the size of callee regs vector is 11 core registers and 8 fp registers.
CHECK_EQ(callee_save_regs.size(), kJniRefSpillRegsSize);
// Increase frame to required size - must be at least space to push StackReference<Method>.
CHECK_GT(frame_size, kJniRefSpillRegsSize * kFramePointerSize);
@@ -682,6 +683,23 @@
reg_offset -= 8;
StoreToOffset(X20, SP, reg_offset);
+ reg_offset -= 8;
+ StoreDToOffset(D15, SP, reg_offset);
+ reg_offset -= 8;
+ StoreDToOffset(D14, SP, reg_offset);
+ reg_offset -= 8;
+ StoreDToOffset(D13, SP, reg_offset);
+ reg_offset -= 8;
+ StoreDToOffset(D12, SP, reg_offset);
+ reg_offset -= 8;
+ StoreDToOffset(D11, SP, reg_offset);
+ reg_offset -= 8;
+ StoreDToOffset(D10, SP, reg_offset);
+ reg_offset -= 8;
+ StoreDToOffset(D9, SP, reg_offset);
+ reg_offset -= 8;
+ StoreDToOffset(D8, SP, reg_offset);
+
// Move TR(Caller saved) to ETR(Callee saved). The original (ETR)X21 has been saved on stack.
// This way we make sure that TR is not trashed by native code.
___ Mov(reg_x(ETR), reg_x(TR));
@@ -753,6 +771,23 @@
reg_offset -= 8;
LoadFromOffset(X20, SP, reg_offset);
+ reg_offset -= 8;
+ LoadDFromOffset(D15, SP, reg_offset);
+ reg_offset -= 8;
+ LoadDFromOffset(D14, SP, reg_offset);
+ reg_offset -= 8;
+ LoadDFromOffset(D13, SP, reg_offset);
+ reg_offset -= 8;
+ LoadDFromOffset(D12, SP, reg_offset);
+ reg_offset -= 8;
+ LoadDFromOffset(D11, SP, reg_offset);
+ reg_offset -= 8;
+ LoadDFromOffset(D10, SP, reg_offset);
+ reg_offset -= 8;
+ LoadDFromOffset(D9, SP, reg_offset);
+ reg_offset -= 8;
+ LoadDFromOffset(D8, SP, reg_offset);
+
// Decrease frame size to start of callee saved regs.
DecreaseFrameSize(frame_size);
diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h
index ffb54d3..01e8be9 100644
--- a/compiler/utils/arm64/constants_arm64.h
+++ b/compiler/utils/arm64/constants_arm64.h
@@ -29,8 +29,6 @@
namespace art {
namespace arm64 {
-constexpr unsigned int kJniRefSpillRegsSize = 11;
-
constexpr size_t kArm64BaseBufferSize = 4096;
} // namespace arm64
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0b1f14d..22665ea 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1052,6 +1052,13 @@
runtime_options.push_back(
std::make_pair("imageinstructionset", GetInstructionSetString(instruction_set_)));
+ // Only allow no boot image for the runtime if we're compiling one. When we compile an app,
+ // we don't want fallback mode, it will abort as we do not push a boot classpath (it might
+ // have been stripped in preopting, anyways).
+ if (!image_) {
+ runtime_options.push_back(std::make_pair("-Xno-dex-file-fallback", nullptr));
+ }
+
if (!CreateRuntime(runtime_options)) {
return false;
}
@@ -1637,9 +1644,13 @@
}
void LogCompletionTime() {
+ // Note: when creation of a runtime fails, e.g., when trying to compile an app but when there
+ // is no image, there won't be a Runtime::Current().
LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_)
<< " (threads: " << thread_count_ << ") "
- << driver_->GetMemoryUsageString(kIsDebugBuild || VLOG_IS_ON(compiler));
+ << ((Runtime::Current() != nullptr) ?
+ driver_->GetMemoryUsageString(kIsDebugBuild || VLOG_IS_ON(compiler)) :
+ "");
}
std::unique_ptr<CompilerOptions> compiler_options_;
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 7442c70..3d8a567 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -143,27 +143,31 @@
{ kITypeMask, 61u << kOpcodeShift, "sdc1", "tO", },
// Floating point.
- { kFpMask, kCop1 | 0, "add", "fdst" },
- { kFpMask, kCop1 | 1, "sub", "fdst" },
- { kFpMask, kCop1 | 2, "mul", "fdst" },
- { kFpMask, kCop1 | 3, "div", "fdst" },
- { kFpMask | (0x1f << 16), kCop1 | 4, "sqrt", "fdst" },
- { kFpMask | (0x1f << 16), kCop1 | 5, "abs", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 6, "mov", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 7, "neg", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 8, "round.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 9, "trunc.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 10, "ceil.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 11, "floor.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 12, "round.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 13, "trunc.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 14, "ceil.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 15, "floor.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 32, "cvt.s", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 33, "cvt.d", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 36, "cvt.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 37, "cvt.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 38, "cvt.ps", "fds" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x00 << 21) | 0, "mfc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x03 << 21) | 0, "mfhc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x04 << 21) | 0, "mtc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x07 << 21) | 0, "mthc1", "Td" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 0, "add", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 1, "sub", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 2, "mul", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 3, "div", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 4, "sqrt", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 5, "abs", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 6, "mov", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 7, "neg", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 8, "round.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 9, "trunc.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 10, "ceil.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 11, "floor.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 12, "round.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 13, "trunc.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 14, "ceil.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 15, "floor.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 32, "cvt.s", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 33, "cvt.d", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 36, "cvt.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 37, "cvt.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 38, "cvt.ps", "fad" },
};
static uint32_t ReadU32(const uint8_t* ptr) {
@@ -206,6 +210,7 @@
break;
case 'D': args << 'r' << rd; break;
case 'd': args << 'f' << rd; break;
+ case 'a': args << 'f' << sa; break;
case 'f': // Floating point "fmt".
{
size_t fmt = (instruction >> 21) & 0x7; // TODO: other fmts?
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 11ccafb..aab4f8b 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -88,6 +88,7 @@
uint32_t diff = static_cast<uint32_t>(oat_file_->End() - oat_file_->Begin());
uint32_t oat_exec_size = diff - oat_data_size;
+ uint32_t oat_bss_size = oat_file_->BssSize();
elf_output_ = OS::CreateEmptyFile(output_name_.c_str());
@@ -100,6 +101,8 @@
oat_data_size,
oat_data_size,
oat_exec_size,
+ RoundUp(oat_data_size + oat_exec_size, kPageSize),
+ oat_bss_size,
true,
false));
@@ -311,13 +314,23 @@
bool dump_vmap,
bool disassemble_code,
bool absolute_addresses,
- const char* method_filter)
+ const char* class_filter,
+ const char* method_filter,
+ bool list_classes,
+ bool list_methods,
+ const char* export_dex_location,
+ uint32_t addr2instr)
: dump_raw_mapping_table_(dump_raw_mapping_table),
dump_raw_gc_map_(dump_raw_gc_map),
dump_vmap_(dump_vmap),
disassemble_code_(disassemble_code),
absolute_addresses_(absolute_addresses),
+ class_filter_(class_filter),
method_filter_(method_filter),
+ list_classes_(list_classes),
+ list_methods_(list_methods),
+ export_dex_location_(export_dex_location),
+ addr2instr_(addr2instr),
class_loader_(nullptr) {}
const bool dump_raw_mapping_table_;
@@ -325,27 +338,34 @@
const bool dump_vmap_;
const bool disassemble_code_;
const bool absolute_addresses_;
+ const char* const class_filter_;
const char* const method_filter_;
+ const bool list_classes_;
+ const bool list_methods_;
+ const char* const export_dex_location_;
+ uint32_t addr2instr_;
Handle<mirror::ClassLoader>* class_loader_;
};
class OatDumper {
public:
- explicit OatDumper(const OatFile& oat_file, OatDumperOptions* options)
+ explicit OatDumper(const OatFile& oat_file, const OatDumperOptions& options)
: oat_file_(oat_file),
oat_dex_files_(oat_file.GetOatDexFiles()),
options_(options),
+ resolved_addr2instr_(0),
instruction_set_(oat_file_.GetOatHeader().GetInstructionSet()),
disassembler_(Disassembler::Create(instruction_set_,
- new DisassemblerOptions(options_->absolute_addresses_,
+ new DisassemblerOptions(options_.absolute_addresses_,
oat_file.Begin(),
true /* can_read_litals_ */))) {
- CHECK(options_->class_loader_ != nullptr);
+ CHECK(options_.class_loader_ != nullptr);
+ CHECK(options_.class_filter_ != nullptr);
+ CHECK(options_.method_filter_ != nullptr);
AddAllOffsets();
}
~OatDumper() {
- delete options_;
delete disassembler_;
}
@@ -380,7 +400,7 @@
#define DUMP_OAT_HEADER_OFFSET(label, offset) \
os << label " OFFSET:\n"; \
os << StringPrintf("0x%08x", oat_header.offset()); \
- if (oat_header.offset() != 0 && options_->absolute_addresses_) { \
+ if (oat_header.offset() != 0 && options_.absolute_addresses_) { \
os << StringPrintf(" (%p)", oat_file_.Begin() + oat_header.offset()); \
} \
os << StringPrintf("\n\n");
@@ -426,7 +446,7 @@
os << "\n";
}
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
os << "BEGIN:\n";
os << reinterpret_cast<const void*>(oat_file_.Begin()) << "\n\n";
@@ -439,11 +459,26 @@
os << std::flush;
+ // If set, adjust relative address to be searched
+ if (options_.addr2instr_ != 0) {
+ resolved_addr2instr_ = options_.addr2instr_ + oat_header.GetExecutableOffset();
+ os << "SEARCH ADDRESS (executable offset + input):\n";
+ os << StringPrintf("0x%08x\n\n", resolved_addr2instr_);
+ }
+
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
- if (!DumpOatDexFile(os, *oat_dex_file)) {
- success = false;
+
+ // If file export selected skip file analysis
+ if (options_.export_dex_location_) {
+ if (!ExportDexFile(os, *oat_dex_file)) {
+ success = false;
+ }
+ } else {
+ if (!DumpOatDexFile(os, *oat_dex_file)) {
+ success = false;
+ }
}
}
os << std::flush;
@@ -553,6 +588,7 @@
bool DumpOatDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
bool success = true;
+ bool stop_analysis = false;
os << "OatDexFile:\n";
os << StringPrintf("location: %s\n", oat_dex_file.GetDexFileLocation().c_str());
os << StringPrintf("checksum: 0x%08x\n", oat_dex_file.GetDexFileLocationChecksum());
@@ -571,6 +607,12 @@
class_def_index++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
+
+ // TODO: Support regex
+ if (DescriptorToDot(descriptor).find(options_.class_filter_) == std::string::npos) {
+ continue;
+ }
+
uint32_t oat_class_offset = oat_dex_file.GetOatClassOffset(class_def_index);
const OatFile::OatClass oat_class = oat_dex_file.GetOatClass(class_def_index);
os << StringPrintf("%zd: %s (offset=0x%08x) (type_idx=%d)",
@@ -580,15 +622,98 @@
// TODO: include bitmap here if type is kOatClassSomeCompiled?
Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indented_os(&indent_filter);
- if (!DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def)) {
+ if (options_.list_classes_) continue;
+ if (!DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def, &stop_analysis)) {
success = false;
}
+ if (stop_analysis) {
+ os << std::flush;
+ return success;
+ }
}
os << std::flush;
return success;
}
+ bool ExportDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
+ std::string error_msg;
+ std::string dex_file_location = oat_dex_file.GetDexFileLocation();
+
+ std::unique_ptr<const DexFile> dex_file(oat_dex_file.OpenDexFile(&error_msg));
+ if (dex_file == nullptr) {
+ os << "Failed to open dex file '" << dex_file_location << "': " << error_msg;
+ return false;
+ }
+ size_t fsize = oat_dex_file.FileSize();
+
+ // Some quick checks just in case
+ if (fsize == 0 || fsize < sizeof(DexFile::Header)) {
+ os << "Invalid dex file\n";
+ return false;
+ }
+
+ // Verify output directory exists
+ if (!OS::DirectoryExists(options_.export_dex_location_)) {
+ // TODO: Extend OS::DirectoryExists if symlink support is required
+ os << options_.export_dex_location_ << " output directory not found or symlink\n";
+ return false;
+ }
+
+ // Beautify path names
+ if (dex_file_location.size() > PATH_MAX || dex_file_location.size() <= 0) {
+ return false;
+ }
+
+ std::string dex_orig_name;
+ size_t dex_orig_pos = dex_file_location.rfind('/');
+ if (dex_orig_pos == std::string::npos)
+ dex_orig_name = dex_file_location;
+ else
+ dex_orig_name = dex_file_location.substr(dex_orig_pos + 1);
+
+ // A more elegant approach to efficiently name user installed apps is welcome
+ if (dex_orig_name.size() == 8 && !dex_orig_name.compare("base.apk")) {
+ dex_file_location.erase(dex_orig_pos, strlen("base.apk") + 1);
+ size_t apk_orig_pos = dex_file_location.rfind('/');
+ if (apk_orig_pos != std::string::npos) {
+ dex_orig_name = dex_file_location.substr(++apk_orig_pos);
+ }
+ }
+
+ std::string out_dex_path(options_.export_dex_location_);
+ if (out_dex_path.back() != '/') {
+ out_dex_path.append("/");
+ }
+ out_dex_path.append(dex_orig_name);
+ out_dex_path.append("_export.dex");
+ if (out_dex_path.length() > PATH_MAX) {
+ return false;
+ }
+
+ std::unique_ptr<File> file(OS::CreateEmptyFile(out_dex_path.c_str()));
+ if (file.get() == nullptr) {
+ os << "Failed to open output dex file " << out_dex_path;
+ return false;
+ }
+
+ if (!file->WriteFully(dex_file->Begin(), fsize)) {
+ os << "Failed to write dex file";
+ file->Erase();
+ return false;
+ }
+
+ if (file->FlushCloseOrErase() != 0) {
+ os << "Flush and close failed";
+ return false;
+ }
+
+ os << StringPrintf("Dex file exported at %s (%zd bytes)\n", out_dex_path.c_str(), fsize);
+ os << std::flush;
+
+ return true;
+ }
+
static void SkipAllFields(ClassDataItemIterator& it) {
while (it.HasNextStaticField()) {
it.Next();
@@ -599,8 +724,9 @@
}
bool DumpOatClass(std::ostream& os, const OatFile::OatClass& oat_class, const DexFile& dex_file,
- const DexFile::ClassDef& class_def) {
+ const DexFile::ClassDef& class_def, bool* stop_analysis) {
bool success = true;
+ bool addr_found = false;
const uint8_t* class_data = dex_file.GetClassData(class_def);
if (class_data == nullptr) { // empty class such as a marker interface?
os << std::flush;
@@ -612,18 +738,26 @@
while (it.HasNextDirectMethod()) {
if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file,
it.GetMemberIndex(), it.GetMethodCodeItem(),
- it.GetRawMemberAccessFlags())) {
+ it.GetRawMemberAccessFlags(), &addr_found)) {
success = false;
}
+ if (addr_found) {
+ *stop_analysis = true;
+ return success;
+ }
class_method_index++;
it.Next();
}
while (it.HasNextVirtualMethod()) {
if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file,
it.GetMemberIndex(), it.GetMethodCodeItem(),
- it.GetRawMemberAccessFlags())) {
+ it.GetRawMemberAccessFlags(), &addr_found)) {
success = false;
}
+ if (addr_found) {
+ *stop_analysis = true;
+ return success;
+ }
class_method_index++;
it.Next();
}
@@ -641,20 +775,39 @@
uint32_t class_method_index,
const OatFile::OatClass& oat_class, const DexFile& dex_file,
uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
- uint32_t method_access_flags) {
+ uint32_t method_access_flags, bool* addr_found) {
bool success = true;
- std::string pretty_method = PrettyMethod(dex_method_idx, dex_file, true);
- if (pretty_method.find(options_->method_filter_) == std::string::npos) {
+
+ // TODO: Support regex
+ std::string method_name = dex_file.GetMethodName(dex_file.GetMethodId(dex_method_idx));
+ if (method_name.find(options_.method_filter_) == std::string::npos) {
return success;
}
+ std::string pretty_method = PrettyMethod(dex_method_idx, dex_file, true);
os << StringPrintf("%d: %s (dex_method_idx=%d)\n",
class_method_index, pretty_method.c_str(),
dex_method_idx);
+ if (options_.list_methods_) return success;
+
Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::unique_ptr<std::ostream> indent1_os(new std::ostream(&indent1_filter));
Indenter indent2_filter(indent1_os->rdbuf(), kIndentChar, kIndentBy1Count);
std::unique_ptr<std::ostream> indent2_os(new std::ostream(&indent2_filter));
+
+ uint32_t oat_method_offsets_offset = oat_class.GetOatMethodOffsetsOffset(class_method_index);
+ const OatMethodOffsets* oat_method_offsets = oat_class.GetOatMethodOffsets(class_method_index);
+ const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_index);
+ uint32_t code_offset = oat_method.GetCodeOffset();
+ uint32_t code_size = oat_method.GetQuickCodeSize();
+ if (resolved_addr2instr_ != 0) {
+ if (resolved_addr2instr_ > code_offset + code_size) {
+ return success;
+ } else {
+ *addr_found = true; // stop analyzing file at next iteration
+ }
+ }
+
{
*indent1_os << "DEX CODE:\n";
DumpDexCode(*indent2_os, dex_file, code_item);
@@ -666,13 +819,9 @@
verifier.reset(DumpVerifier(*indent2_os, dex_method_idx, &dex_file, class_def, code_item,
method_access_flags));
}
-
- uint32_t oat_method_offsets_offset = oat_class.GetOatMethodOffsetsOffset(class_method_index);
- const OatMethodOffsets* oat_method_offsets = oat_class.GetOatMethodOffsets(class_method_index);
- const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_index);
{
*indent1_os << "OatMethodOffsets ";
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent1_os << StringPrintf("%p ", oat_method_offsets);
}
*indent1_os << StringPrintf("(offset=0x%08x)\n", oat_method_offsets_offset);
@@ -685,7 +834,6 @@
return false;
}
- uint32_t code_offset = oat_method.GetCodeOffset();
*indent2_os << StringPrintf("code_offset: 0x%08x ", code_offset);
uint32_t aligned_code_begin = AlignCodeOffset(oat_method.GetCodeOffset());
if (aligned_code_begin > oat_file_.Size()) {
@@ -697,7 +845,7 @@
*indent2_os << "\n";
*indent2_os << "gc_map: ";
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent2_os << StringPrintf("%p ", oat_method.GetGcMap());
}
uint32_t gc_map_offset = oat_method.GetGcMapOffset();
@@ -707,7 +855,7 @@
"gc map table offset 0x%08x is past end of file 0x%08zx.\n",
gc_map_offset, oat_file_.Size());
success = false;
- } else if (options_->dump_raw_gc_map_) {
+ } else if (options_.dump_raw_gc_map_) {
Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indent3_os(&indent3_filter);
DumpGcMap(indent3_os, oat_method, code_item);
@@ -718,7 +866,7 @@
uint32_t method_header_offset = oat_method.GetOatQuickMethodHeaderOffset();
const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader();
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent1_os << StringPrintf("%p ", method_header);
}
*indent1_os << StringPrintf("(offset=0x%08x)\n", method_header_offset);
@@ -732,7 +880,7 @@
}
*indent2_os << "mapping_table: ";
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent2_os << StringPrintf("%p ", oat_method.GetMappingTable());
}
uint32_t mapping_table_offset = oat_method.GetMappingTableOffset();
@@ -744,14 +892,14 @@
mapping_table_offset, oat_file_.Size(),
oat_method.GetMappingTableOffsetOffset());
success = false;
- } else if (options_->dump_raw_mapping_table_) {
+ } else if (options_.dump_raw_mapping_table_) {
Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indent3_os(&indent3_filter);
DumpMappingTable(indent3_os, oat_method);
}
*indent2_os << "vmap_table: ";
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent2_os << StringPrintf("%p ", oat_method.GetVmapTable());
}
uint32_t vmap_table_offset = oat_method.GetVmapTableOffset();
@@ -763,7 +911,7 @@
vmap_table_offset, oat_file_.Size(),
oat_method.GetVmapTableOffsetOffset());
success = false;
- } else if (options_->dump_vmap_) {
+ } else if (options_.dump_vmap_) {
DumpVmapData(*indent2_os, oat_method, code_item);
}
}
@@ -794,12 +942,10 @@
success = false;
} else {
const void* code = oat_method.GetQuickCode();
- uint32_t code_size = oat_method.GetQuickCodeSize();
- uint32_t code_offset = oat_method.GetCodeOffset();
uint32_t aligned_code_begin = AlignCodeOffset(code_offset);
uint64_t aligned_code_end = aligned_code_begin + code_size;
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent1_os << StringPrintf("%p ", code);
}
*indent1_os << StringPrintf("(code_offset=0x%08x size_offset=0x%08x size=%u)%s\n",
@@ -820,7 +966,7 @@
aligned_code_end, oat_file_.Size(),
code_size, code_size_offset);
success = false;
- if (options_->disassemble_code_) {
+ if (options_.disassemble_code_) {
if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes);
}
@@ -832,12 +978,12 @@
code_size, kMaxCodeSize,
code_size, code_size_offset);
success = false;
- if (options_->disassemble_code_) {
+ if (options_.disassemble_code_) {
if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes);
}
}
- } else if (options_->disassemble_code_) {
+ } else if (options_.disassemble_code_) {
DumpCode(*indent2_os, verifier.get(), oat_method, code_item, !success, 0);
}
}
@@ -1175,7 +1321,8 @@
size_t i = 0;
while (i < code_item->insns_size_in_code_units_) {
const Instruction* instruction = Instruction::At(&code_item->insns_[i]);
- os << StringPrintf("0x%04zx: %s\n", i, instruction->DumpString(&dex_file).c_str());
+ os << StringPrintf("0x%04zx: ", i) << instruction->DumpHexLE(5)
+ << StringPrintf("\t| %s\n", instruction->DumpString(&dex_file).c_str());
i += instruction->SizeInCodeUnits();
}
}
@@ -1191,10 +1338,10 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(
hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file)));
- DCHECK(options_->class_loader_ != nullptr);
+ DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(soa.Self(), os, dex_method_idx, dex_file,
dex_cache,
- *options_->class_loader_,
+ *options_.class_loader_,
&class_def, code_item,
NullHandle<mirror::ArtMethod>(),
method_access_flags);
@@ -1237,7 +1384,8 @@
const OatFile& oat_file_;
const std::vector<const OatFile::OatDexFile*> oat_dex_files_;
- const OatDumperOptions* options_;
+ const OatDumperOptions& options_;
+ uint32_t resolved_addr2instr_;
InstructionSet instruction_set_;
std::set<uintptr_t> offsets_;
Disassembler* disassembler_;
@@ -1335,7 +1483,7 @@
stats_.oat_file_bytes = oat_file->Size();
- oat_dumper_.reset(new OatDumper(*oat_file, oat_dumper_options_.release()));
+ oat_dumper_.reset(new OatDumper(*oat_file, *oat_dumper_options_));
for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
CHECK(oat_dex_file != nullptr);
@@ -2045,17 +2193,18 @@
soa.Decode<mirror::ClassLoader*>(class_loader));
options->class_loader_ = &loader_handle;
- OatDumper oat_dumper(*oat_file, options);
+ OatDumper oat_dumper(*oat_file, *options);
bool success = oat_dumper.Dump(*os);
return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
}
static int DumpOatWithoutRuntime(OatFile* oat_file, OatDumperOptions* options, std::ostream* os) {
+ CHECK(oat_file != nullptr && options != nullptr);
// No image = no class loader.
NullHandle<mirror::ClassLoader> null_class_loader;
options->class_loader_ = &null_class_loader;
- OatDumper oat_dumper(*oat_file, options);
+ OatDumper oat_dumper(*oat_file, *options);
bool success = oat_dumper.Dump(*os);
return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
}
@@ -2127,8 +2276,21 @@
} else if (option.starts_with("--symbolize=")) {
oat_filename_ = option.substr(strlen("--symbolize=")).data();
symbolize_ = true;
+ } else if (option.starts_with("--class-filter=")) {
+ class_filter_ = option.substr(strlen("--class-filter=")).data();
} else if (option.starts_with("--method-filter=")) {
method_filter_ = option.substr(strlen("--method-filter=")).data();
+ } else if (option.starts_with("--list-classes")) {
+ list_classes_ = true;
+ } else if (option.starts_with("--list-methods")) {
+ list_methods_ = true;
+ } else if (option.starts_with("--export-dex-to=")) {
+ export_dex_location_ = option.substr(strlen("--export-dex-to=")).data();
+ } else if (option.starts_with("--addr2instr=")) {
+ if (!ParseUint(option.substr(strlen("--addr2instr=")).data(), &addr2instr_)) {
+ *error_msg = "Address conversion failed";
+ return kParseError;
+ }
} else {
return kParseUnknownArgument;
}
@@ -2191,8 +2353,29 @@
" --no-disassemble may be used to disable disassembly.\n"
" Example: --no-disassemble\n"
"\n"
+ " --list-classes may be used to list target file classes (can be used with filters).\n"
+ " Example: --list-classes\n"
+ " Example: --list-classes --class-filter=com.example.foo\n"
+ "\n"
+ " --list-methods may be used to list target file methods (can be used with filters).\n"
+ " Example: --list-methods\n"
+ " Example: --list-methods --class-filter=com.example --method-filter=foo\n"
+ "\n"
+ " --symbolize=<file.oat>: output a copy of file.oat with elf symbols included.\n"
+ " Example: --symbolize=/system/framework/boot.oat\n"
+ "\n"
+ " --class-filter=<class name>: only dumps classes that contain the filter.\n"
+ " Example: --class-filter=com.example.foo\n"
+ "\n"
" --method-filter=<method name>: only dumps methods that contain the filter.\n"
" Example: --method-filter=foo\n"
+ "\n"
+ " --export-dex-to=<directory>: may be used to export oat embedded dex files.\n"
+ " Example: --export-dex-to=/data/local/tmp\n"
+ "\n"
+ " --addr2instr=<address>: output matching method disassembled code from relative\n"
+ " address (e.g. PC from crash dump)\n"
+ " Example: --addr2instr=0x00001a3b\n"
"\n";
return usage;
@@ -2200,6 +2383,7 @@
public:
const char* oat_filename_ = nullptr;
+ const char* class_filter_ = "";
const char* method_filter_ = "";
const char* image_location_ = nullptr;
std::string elf_filename_prefix_;
@@ -2208,6 +2392,10 @@
bool dump_vmap_ = true;
bool disassemble_code_ = true;
bool symbolize_ = false;
+ bool list_classes_ = false;
+ bool list_methods_ = false;
+ uint32_t addr2instr_ = 0;
+ const char* export_dex_location_ = nullptr;
};
struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
@@ -2223,7 +2411,12 @@
args_->dump_vmap_,
args_->disassemble_code_,
absolute_addresses,
- args_->method_filter_));
+ args_->class_filter_,
+ args_->method_filter_,
+ args_->list_classes_,
+ args_->list_methods_,
+ args_->export_dex_location_,
+ args_->addr2instr_));
return (args_->boot_image_location_ != nullptr || args_->image_location_ != nullptr) &&
!args_->symbolize_;
@@ -2240,7 +2433,7 @@
} else {
return DumpOat(nullptr,
args_->oat_filename_,
- oat_dumper_options_.release(),
+ oat_dumper_options_.get(),
args_->os_) == EXIT_SUCCESS;
}
}
@@ -2251,11 +2444,11 @@
if (args_->oat_filename_ != nullptr) {
return DumpOat(runtime,
args_->oat_filename_,
- oat_dumper_options_.release(),
+ oat_dumper_options_.get(),
args_->os_) == EXIT_SUCCESS;
}
- return DumpImage(runtime, args_->image_location_, oat_dumper_options_.release(), args_->os_)
+ return DumpImage(runtime, args_->image_location_, oat_dumper_options_.get(), args_->os_)
== EXIT_SUCCESS;
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index c647cc2..c5cf890 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -44,6 +44,7 @@
elf_file.cc \
gc/allocator/dlmalloc.cc \
gc/allocator/rosalloc.cc \
+ gc/accounting/bitmap.cc \
gc/accounting/card_table.cc \
gc/accounting/heap_bitmap.cc \
gc/accounting/mod_union_table.cc \
@@ -88,6 +89,9 @@
jdwp/jdwp_socket.cc \
jdwp/object_registry.cc \
jni_env_ext.cc \
+ jit/jit.cc \
+ jit/jit_code_cache.cc \
+ jit/jit_instrumentation.cc \
jni_internal.cc \
jobject_comparator.cc \
mem_map.cc \
@@ -298,6 +302,7 @@
base/unix_file/fd_file.h \
dex_file.h \
dex_instruction.h \
+ dex_instruction_utils.h \
gc_root.h \
gc/allocator/rosalloc.h \
gc/collector/gc_type.h \
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index fec1ce5..539b607 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -890,7 +890,7 @@
// r1: pointer to the bottom of the used area of the alloca, can restore stack till there.
// Check for error = 0.
- cbz r0, .Lentry_error
+ cbz r0, .Lexception_in_native
// Release part of the alloca.
mov sp, r1
@@ -920,10 +920,6 @@
blx artQuickGenericJniEndTrampoline
- // Tear down the alloca.
- mov sp, r10
- .cfi_def_cfa_register sp
-
// Restore self pointer.
mov r9, r11
@@ -931,6 +927,10 @@
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
cbnz r2, .Lexception_in_native
+ // Tear down the alloca.
+ mov sp, r10
+ .cfi_def_cfa_register sp
+
// Tear down the callee-save frame. Skip arg registers.
add sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
.cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
@@ -943,14 +943,11 @@
.cfi_def_cfa_register r10
.cfi_adjust_cfa_offset FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-.Lentry_error:
- mov sp, r10
- .cfi_def_cfa_register sp
- mov r9, r11
.Lexception_in_native:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ ldr sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
+ .cfi_def_cfa_register sp
+ # This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
-
END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index b0c66b3..f6bfee7 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -61,6 +61,15 @@
return fix_cortex_a53_835769_;
}
+ // TODO: Tune this on a per CPU basis. For now, we pessimistically assume
+ // that all ARM64 CPUs prefer explicit memory barriers over acquire-release.
+ //
+ // NOTE: This should not be the case! However we want to exercise the
+ // explicit memory barriers code paths in the Optimizing Compiler.
+ bool PreferAcquireRelease() const {
+ return false;
+ }
+
virtual ~Arm64InstructionSetFeatures() {}
protected:
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index 027e59c..753107b 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -30,6 +30,8 @@
EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
EXPECT_STREQ("smp,a53", arm64_features->GetFeatureString().c_str());
EXPECT_EQ(arm64_features->AsBitmap(), 3U);
+ // See the comments in instruction_set_features_arm64.h.
+ EXPECT_FALSE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease());
}
} // namespace art
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 770073b5..ec25a33 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1510,7 +1510,7 @@
// x1: pointer to the bottom of the used area of the alloca, can restore stack till there.
// Check for error = 0.
- cbz x0, .Lentry_error
+ cbz x0, .Lexception_in_native
// Release part of the alloca.
mov sp, x1
@@ -1545,15 +1545,15 @@
bl artQuickGenericJniEndTrampoline
+ // Pending exceptions possible.
+ // Use xETR as xSELF might be scratched by native code
+ ldr x2, [xETR, THREAD_EXCEPTION_OFFSET]
+ cbnz x2, .Lexception_in_native
+
// Tear down the alloca.
mov sp, x28
.cfi_def_cfa_register sp
- // Pending exceptions possible.
- // Use xETR as xSELF might be scratched by native code
- ldr x1, [xETR, THREAD_EXCEPTION_OFFSET]
- cbnz x1, .Lexception_in_native
-
// Tear down the callee-save frame.
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
@@ -1561,13 +1561,15 @@
fmov d0, x0
ret
-.Lentry_error:
- mov sp, x28
- .cfi_def_cfa_register sp
.Lexception_in_native:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ // Restore xSELF. It might have been scratched by native code.
+ mov xSELF, xETR
+ // Move to x1 then sp to please assembler.
+ ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
+ mov sp, x1
+ .cfi_def_cfa_register sp
+ # This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
-
END art_quick_generic_jni_trampoline
/*
diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h
new file mode 100644
index 0000000..b1aa3ee
--- /dev/null
+++ b/runtime/arch/mips/entrypoints_direct_mips.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
+
+#include "entrypoints/quick/quick_entrypoints_enum.h"
+
+namespace art {
+
+/* Returns true if entrypoint contains direct reference to
+ native implementation. The list is required as direct
+ entrypoints need additional handling during invocation.*/
+static constexpr bool IsDirectEntrypoint(QuickEntrypointEnum entrypoint) {
+ return
+ entrypoint == kQuickInstanceofNonTrivial ||
+ entrypoint == kQuickA64Load ||
+ entrypoint == kQuickA64Store ||
+ entrypoint == kQuickFmod ||
+ entrypoint == kQuickFmodf ||
+ entrypoint == kQuickMemcpy ||
+ entrypoint == kQuickL2d ||
+ entrypoint == kQuickL2f ||
+ entrypoint == kQuickD2iz ||
+ entrypoint == kQuickF2iz ||
+ entrypoint == kQuickD2l ||
+ entrypoint == kQuickF2l ||
+ entrypoint == kQuickLdiv ||
+ entrypoint == kQuickLmod ||
+ entrypoint == kQuickLmul ||
+ entrypoint == kQuickCmpgDouble ||
+ entrypoint == kQuickCmpgFloat ||
+ entrypoint == kQuickCmplDouble ||
+ entrypoint == kQuickCmplFloat;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 1a661c4..e3ec27c 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -23,6 +23,7 @@
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
#include "entrypoints/runtime_asm_entrypoints.h"
+#include "entrypoints_direct_mips.h"
#include "interpreter/interpreter.h"
namespace art {
@@ -72,83 +73,155 @@
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+ static_assert(IsDirectEntrypoint(kQuickInstanceofNonTrivial), "Direct C stub not marked direct.");
qpoints->pCheckCast = art_quick_check_cast;
+ static_assert(!IsDirectEntrypoint(kQuickCheckCast), "Non-direct C stub marked direct.");
// DexCache
qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
+ static_assert(!IsDirectEntrypoint(kQuickInitializeStaticStorage),
+ "Non-direct C stub marked direct.");
qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
+ static_assert(!IsDirectEntrypoint(kQuickInitializeTypeAndVerifyAccess),
+ "Non-direct C stub marked direct.");
qpoints->pInitializeType = art_quick_initialize_type;
+ static_assert(!IsDirectEntrypoint(kQuickInitializeType), "Non-direct C stub marked direct.");
qpoints->pResolveString = art_quick_resolve_string;
+ static_assert(!IsDirectEntrypoint(kQuickResolveString), "Non-direct C stub marked direct.");
// Field
qpoints->pSet8Instance = art_quick_set8_instance;
+ static_assert(!IsDirectEntrypoint(kQuickSet8Instance), "Non-direct C stub marked direct.");
qpoints->pSet8Static = art_quick_set8_static;
+ static_assert(!IsDirectEntrypoint(kQuickSet8Static), "Non-direct C stub marked direct.");
qpoints->pSet16Instance = art_quick_set16_instance;
+ static_assert(!IsDirectEntrypoint(kQuickSet16Instance), "Non-direct C stub marked direct.");
qpoints->pSet16Static = art_quick_set16_static;
+ static_assert(!IsDirectEntrypoint(kQuickSet16Static), "Non-direct C stub marked direct.");
qpoints->pSet32Instance = art_quick_set32_instance;
+ static_assert(!IsDirectEntrypoint(kQuickSet32Instance), "Non-direct C stub marked direct.");
qpoints->pSet32Static = art_quick_set32_static;
+ static_assert(!IsDirectEntrypoint(kQuickSet32Static), "Non-direct C stub marked direct.");
qpoints->pSet64Instance = art_quick_set64_instance;
+ static_assert(!IsDirectEntrypoint(kQuickSet64Instance), "Non-direct C stub marked direct.");
qpoints->pSet64Static = art_quick_set64_static;
+ static_assert(!IsDirectEntrypoint(kQuickSet64Static), "Non-direct C stub marked direct.");
qpoints->pSetObjInstance = art_quick_set_obj_instance;
+ static_assert(!IsDirectEntrypoint(kQuickSetObjInstance), "Non-direct C stub marked direct.");
qpoints->pSetObjStatic = art_quick_set_obj_static;
+ static_assert(!IsDirectEntrypoint(kQuickSetObjStatic), "Non-direct C stub marked direct.");
qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGetBooleanInstance), "Non-direct C stub marked direct.");
qpoints->pGetByteInstance = art_quick_get_byte_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGetByteInstance), "Non-direct C stub marked direct.");
qpoints->pGetCharInstance = art_quick_get_char_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGetCharInstance), "Non-direct C stub marked direct.");
qpoints->pGetShortInstance = art_quick_get_short_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGetShortInstance), "Non-direct C stub marked direct.");
qpoints->pGet32Instance = art_quick_get32_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGet32Instance), "Non-direct C stub marked direct.");
qpoints->pGet64Instance = art_quick_get64_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGet64Instance), "Non-direct C stub marked direct.");
qpoints->pGetObjInstance = art_quick_get_obj_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGetObjInstance), "Non-direct C stub marked direct.");
qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
+ static_assert(!IsDirectEntrypoint(kQuickGetBooleanStatic), "Non-direct C stub marked direct.");
qpoints->pGetByteStatic = art_quick_get_byte_static;
+ static_assert(!IsDirectEntrypoint(kQuickGetByteStatic), "Non-direct C stub marked direct.");
qpoints->pGetCharStatic = art_quick_get_char_static;
+ static_assert(!IsDirectEntrypoint(kQuickGetCharStatic), "Non-direct C stub marked direct.");
qpoints->pGetShortStatic = art_quick_get_short_static;
+ static_assert(!IsDirectEntrypoint(kQuickGetShortStatic), "Non-direct C stub marked direct.");
qpoints->pGet32Static = art_quick_get32_static;
+ static_assert(!IsDirectEntrypoint(kQuickGet32Static), "Non-direct C stub marked direct.");
qpoints->pGet64Static = art_quick_get64_static;
+ static_assert(!IsDirectEntrypoint(kQuickGet64Static), "Non-direct C stub marked direct.");
qpoints->pGetObjStatic = art_quick_get_obj_static;
+ static_assert(!IsDirectEntrypoint(kQuickGetObjStatic), "Non-direct C stub marked direct.");
// Array
qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check;
+ static_assert(!IsDirectEntrypoint(kQuickAputObjectWithNullAndBoundCheck),
+ "Non-direct C stub marked direct.");
qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check;
+ static_assert(!IsDirectEntrypoint(kQuickAputObjectWithBoundCheck),
+ "Non-direct C stub marked direct.");
qpoints->pAputObject = art_quick_aput_obj;
+ static_assert(!IsDirectEntrypoint(kQuickAputObject), "Non-direct C stub marked direct.");
qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
+ static_assert(!IsDirectEntrypoint(kQuickHandleFillArrayData), "Non-direct C stub marked direct.");
// JNI
qpoints->pJniMethodStart = JniMethodStart;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodStart), "Non-direct C stub marked direct.");
qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodStartSynchronized),
+ "Non-direct C stub marked direct.");
qpoints->pJniMethodEnd = JniMethodEnd;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodEnd), "Non-direct C stub marked direct.");
qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodEndSynchronized),
+ "Non-direct C stub marked direct.");
qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodEndWithReference),
+ "Non-direct C stub marked direct.");
qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodEndWithReferenceSynchronized),
+ "Non-direct C stub marked direct.");
qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
+ static_assert(!IsDirectEntrypoint(kQuickQuickGenericJniTrampoline),
+ "Non-direct C stub marked direct.");
// Locks
qpoints->pLockObject = art_quick_lock_object;
+ static_assert(!IsDirectEntrypoint(kQuickLockObject), "Non-direct C stub marked direct.");
qpoints->pUnlockObject = art_quick_unlock_object;
+ static_assert(!IsDirectEntrypoint(kQuickUnlockObject), "Non-direct C stub marked direct.");
// Math
qpoints->pCmpgDouble = CmpgDouble;
+ static_assert(IsDirectEntrypoint(kQuickCmpgDouble), "Direct C stub not marked direct.");
qpoints->pCmpgFloat = CmpgFloat;
+ static_assert(IsDirectEntrypoint(kQuickCmpgFloat), "Direct C stub not marked direct.");
qpoints->pCmplDouble = CmplDouble;
+ static_assert(IsDirectEntrypoint(kQuickCmplDouble), "Direct C stub not marked direct.");
qpoints->pCmplFloat = CmplFloat;
+ static_assert(IsDirectEntrypoint(kQuickCmplFloat), "Direct C stub not marked direct.");
qpoints->pFmod = fmod;
+ static_assert(IsDirectEntrypoint(kQuickFmod), "Direct C stub not marked direct.");
qpoints->pL2d = art_l2d;
+ static_assert(IsDirectEntrypoint(kQuickL2d), "Direct C stub not marked direct.");
qpoints->pFmodf = fmodf;
+ static_assert(IsDirectEntrypoint(kQuickFmodf), "Direct C stub not marked direct.");
qpoints->pL2f = art_l2f;
+ static_assert(IsDirectEntrypoint(kQuickL2f), "Direct C stub not marked direct.");
qpoints->pD2iz = art_d2i;
+ static_assert(IsDirectEntrypoint(kQuickD2iz), "Direct C stub not marked direct.");
qpoints->pF2iz = art_f2i;
+ static_assert(IsDirectEntrypoint(kQuickF2iz), "Direct C stub not marked direct.");
qpoints->pIdivmod = NULL;
qpoints->pD2l = art_d2l;
+ static_assert(IsDirectEntrypoint(kQuickD2l), "Direct C stub not marked direct.");
qpoints->pF2l = art_f2l;
+ static_assert(IsDirectEntrypoint(kQuickF2l), "Direct C stub not marked direct.");
qpoints->pLdiv = artLdiv;
+ static_assert(IsDirectEntrypoint(kQuickLdiv), "Direct C stub not marked direct.");
qpoints->pLmod = artLmod;
+ static_assert(IsDirectEntrypoint(kQuickLmod), "Direct C stub not marked direct.");
qpoints->pLmul = artLmul;
+ static_assert(IsDirectEntrypoint(kQuickLmul), "Direct C stub not marked direct.");
qpoints->pShlLong = art_quick_shl_long;
+ static_assert(!IsDirectEntrypoint(kQuickShlLong), "Non-direct C stub marked direct.");
qpoints->pShrLong = art_quick_shr_long;
+ static_assert(!IsDirectEntrypoint(kQuickShrLong), "Non-direct C stub marked direct.");
qpoints->pUshrLong = art_quick_ushr_long;
+ static_assert(!IsDirectEntrypoint(kQuickUshrLong), "Non-direct C stub marked direct.");
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
+ static_assert(!IsDirectEntrypoint(kQuickIndexOf), "Non-direct C stub marked direct.");
qpoints->pStringCompareTo = art_quick_string_compareto;
+ static_assert(!IsDirectEntrypoint(kQuickStringCompareTo), "Non-direct C stub marked direct.");
qpoints->pMemcpy = memcpy;
// Invocation
@@ -156,25 +229,44 @@
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeDirectTrampolineWithAccessCheck),
+ "Non-direct C stub marked direct.");
qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeInterfaceTrampolineWithAccessCheck),
+ "Non-direct C stub marked direct.");
qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeStaticTrampolineWithAccessCheck),
+ "Non-direct C stub marked direct.");
qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeSuperTrampolineWithAccessCheck),
+ "Non-direct C stub marked direct.");
qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeVirtualTrampolineWithAccessCheck),
+ "Non-direct C stub marked direct.");
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
+ static_assert(!IsDirectEntrypoint(kQuickTestSuspend), "Non-direct C stub marked direct.");
// Throws
qpoints->pDeliverException = art_quick_deliver_exception;
+ static_assert(!IsDirectEntrypoint(kQuickDeliverException), "Non-direct C stub marked direct.");
qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
+ static_assert(!IsDirectEntrypoint(kQuickThrowArrayBounds), "Non-direct C stub marked direct.");
qpoints->pThrowDivZero = art_quick_throw_div_zero;
+ static_assert(!IsDirectEntrypoint(kQuickThrowDivZero), "Non-direct C stub marked direct.");
qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
+ static_assert(!IsDirectEntrypoint(kQuickThrowNoSuchMethod), "Non-direct C stub marked direct.");
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
+ static_assert(!IsDirectEntrypoint(kQuickThrowNullPointer), "Non-direct C stub marked direct.");
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+ static_assert(!IsDirectEntrypoint(kQuickThrowStackOverflow), "Non-direct C stub marked direct.");
// Atomic 64-bit load/store
qpoints->pA64Load = QuasiAtomic::Read64;
+ static_assert(IsDirectEntrypoint(kQuickA64Load), "Non-direct C stub marked direct.");
qpoints->pA64Store = QuasiAtomic::Write64;
+ static_assert(IsDirectEntrypoint(kQuickA64Store), "Non-direct C stub marked direct.");
};
} // namespace art
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index df2feb7..16f0e70 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1142,10 +1142,10 @@
addiu $sp, $sp, -24 # reserve arg slots
jal artQuickGenericJniEndTrampoline
s.d $f0, 16($sp) # pass result_f
- addiu $sp, $sp, 24 # remove arg slots
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- bne $t0, $zero, 2f # check for pending exceptions
+ bne $t0, $zero, 1f # check for pending exceptions
+
move $sp, $s8 # tear down the alloca
# tear dpown the callee-save frame
@@ -1156,9 +1156,8 @@
nop
1:
- move $sp, $s8 # tear down the alloca
-2:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ lw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+ # This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
END art_quick_generic_jni_trampoline
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 60e692b..6f1b826 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -849,7 +849,7 @@
dmfc1 $a2, $f0
ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- bne $t0, $zero, 2f # check for pending exceptions
+ bne $t0, $zero, 1f # check for pending exceptions
move $sp, $s8 # tear down the alloca
# tear dpown the callee-save frame
@@ -859,9 +859,8 @@
dmtc1 $v0, $f0 # place return value to FP return value
1:
- move $sp, $s8 # tear down the alloca
-2:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ ld $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+ # This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
END art_quick_generic_jni_trampoline
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 6acc2a7..0d41a8f 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -278,6 +278,7 @@
"memory"); // clobber all
// TODO: Should we clobber the other registers?
#else
+ UNUSED(arg0, arg1, arg2, code, referrer);
LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
result = 0;
#endif
@@ -503,6 +504,7 @@
"memory"); // clobber all
// TODO: Should we clobber the other registers?
#else
+ UNUSED(arg0, arg1, arg2, code, referrer, hidden);
LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
result = 0;
#endif
@@ -792,6 +794,7 @@
// Test done.
#else
+ UNUSED(test);
LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1326,6 +1329,7 @@
EXPECT_EQ(values[i], static_cast<uint8_t>(res)) << "Iteration " << i;
}
#else
+ UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1353,6 +1357,7 @@
EXPECT_EQ(values[i], static_cast<int8_t>(res)) << "Iteration " << i;
}
#else
+ UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1388,6 +1393,7 @@
EXPECT_EQ(res, static_cast<uint8_t>(res2));
}
#else
+ UNUSED(obj, f, self, referrer, test);
LOG(INFO) << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1420,6 +1426,7 @@
EXPECT_EQ(res, static_cast<int8_t>(res2));
}
#else
+ UNUSED(obj, f, self, referrer, test);
LOG(INFO) << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1449,6 +1456,7 @@
EXPECT_EQ(values[i], static_cast<uint16_t>(res)) << "Iteration " << i;
}
#else
+ UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1477,6 +1485,7 @@
EXPECT_EQ(static_cast<int16_t>(res), values[i]) << "Iteration " << i;
}
#else
+ UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1510,6 +1519,7 @@
EXPECT_EQ(res, static_cast<uint16_t>(res2));
}
#else
+ UNUSED(obj, f, self, referrer, test);
LOG(INFO) << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1542,6 +1552,7 @@
EXPECT_EQ(res, static_cast<int16_t>(res2));
}
#else
+ UNUSED(obj, f, self, referrer, test);
LOG(INFO) << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1571,6 +1582,7 @@
EXPECT_EQ(res, values[i]) << "Iteration " << i;
}
#else
+ UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1607,6 +1619,7 @@
EXPECT_EQ(res, static_cast<int32_t>(res2));
}
#else
+ UNUSED(obj, f, self, referrer, test);
LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1648,6 +1661,7 @@
set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
#else
+ UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1692,6 +1706,7 @@
set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
#else
+ UNUSED(obj, f, self, referrer, test);
LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c2acdd1..47bc5ea 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1349,7 +1349,7 @@
// Check for error = 0.
test %eax, %eax
- jz .Lentry_error
+ jz .Lexception_in_native
// Release part of the alloca.
movl %edx, %esp
@@ -1371,15 +1371,16 @@
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
call SYMBOL(artQuickGenericJniEndTrampoline)
- // Tear down the alloca.
- movl %ebp, %esp
- CFI_DEF_CFA_REGISTER(esp)
-
// Pending exceptions possible.
mov %fs:THREAD_EXCEPTION_OFFSET, %ebx
testl %ebx, %ebx
jnz .Lexception_in_native
+ // Tear down the alloca.
+ movl %ebp, %esp
+ CFI_DEF_CFA_REGISTER(esp)
+
+
// Tear down the callee-save frame.
// Remove space for FPR args and EAX
addl LITERAL(4 + 4 * 8), %esp
@@ -1397,11 +1398,11 @@
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
ret
-.Lentry_error:
- movl %ebp, %esp
- CFI_DEF_CFA_REGISTER(esp)
.Lexception_in_native:
- RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ movl %fs:THREAD_TOP_QUICK_FRAME_OFFSET, %esp
+ // Do a call to push a new save-all frame required by the runtime.
+ call .Lexception_call
+.Lexception_call:
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_generic_jni_trampoline
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index c865541..406126b 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1361,7 +1361,7 @@
// Check for error = 0.
test %rax, %rax
- jz .Lentry_error
+ jz .Lexception_in_native
// Release part of the alloca.
movq %rdx, %rsp
@@ -1398,16 +1398,16 @@
movq %xmm0, %rdx
call SYMBOL(artQuickGenericJniEndTrampoline)
- // Tear down the alloca.
- movq %rbp, %rsp
- CFI_DEF_CFA_REGISTER(rsp)
-
// Pending exceptions possible.
// TODO: use cmpq, needs direct encoding because of gas bug
movq %gs:THREAD_EXCEPTION_OFFSET, %rcx
test %rcx, %rcx
jnz .Lexception_in_native
+ // Tear down the alloca.
+ movq %rbp, %rsp
+ CFI_DEF_CFA_REGISTER(rsp)
+
// Tear down the callee-save frame.
// Load FPRs.
// movq %xmm0, 16(%rsp) // doesn't make sense!!!
@@ -1440,40 +1440,12 @@
// store into fpr, for when it's a fpr return...
movq %rax, %xmm0
ret
-.Lentry_error:
- movq %rbp, %rsp
- CFI_DEF_CFA_REGISTER(rsp)
.Lexception_in_native:
- // TODO: the handle scope contains the this pointer which is used by the debugger for exception
- // delivery.
- movq %xmm0, 16(%rsp) // doesn't make sense!!!
- movq 24(%rsp), %xmm1 // neither does this!!!
- movq 32(%rsp), %xmm2
- movq 40(%rsp), %xmm3
- movq 48(%rsp), %xmm4
- movq 56(%rsp), %xmm5
- movq 64(%rsp), %xmm6
- movq 72(%rsp), %xmm7
- movq 80(%rsp), %xmm12
- movq 88(%rsp), %xmm13
- movq 96(%rsp), %xmm14
- movq 104(%rsp), %xmm15
- // was 80 + 32 bytes
- addq LITERAL(80 + 4*8), %rsp
- CFI_ADJUST_CFA_OFFSET(-80 - 4*8)
- // Save callee and GPR args, mixed together to agree with core spills bitmap.
- POP rcx // Arg.
- POP rdx // Arg.
- POP rbx // Callee save.
- POP rbp // Callee save.
- POP rsi // Arg.
- POP r8 // Arg.
- POP r9 // Arg.
- POP r12 // Callee save.
- POP r13 // Callee save.
- POP r14 // Callee save.
- POP r15 // Callee save.
-
+ movq %gs:THREAD_TOP_QUICK_FRAME_OFFSET, %rsp
+ CFI_DEF_CFA_REGISTER(rsp)
+ // Do a call to push a new save-all frame required by the runtime.
+ call .Lexception_call
+.Lexception_call:
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_generic_jni_trampoline
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index b3f812e..e6380bf 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -129,7 +129,7 @@
next_(nullptr) {
if (kUseMemMap) {
std::string error_msg;
- map_ = MemMap::MapAnonymous("dalvik-arena", NULL, size, PROT_READ | PROT_WRITE, false,
+ map_ = MemMap::MapAnonymous("dalvik-arena", nullptr, size, PROT_READ | PROT_WRITE, false, false,
&error_msg);
CHECK(map_ != nullptr) << error_msg;
memory_ = map_->Begin();
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index 162eb16..ceff6e8 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -66,8 +66,8 @@
class ArenaAllocatorAdapterKindImpl<false> {
public:
// Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL.
- explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { UNUSED(kind); }
- ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
+ explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind ATTRIBUTE_UNUSED) {}
+ ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl&) = default;
ArenaAllocKind Kind() { return kArenaAllocSTL; }
};
@@ -75,7 +75,7 @@
class ArenaAllocatorAdapterKindImpl {
public:
explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) : kind_(kind) { }
- ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
+ ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl&) = default;
ArenaAllocKind Kind() { return kind_; }
private:
@@ -109,8 +109,8 @@
ArenaAllocatorAdapterKind(other),
arena_allocator_(other.arena_allocator_) {
}
- ArenaAllocatorAdapter(const ArenaAllocatorAdapter& other) = default;
- ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter& other) = default;
+ ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default;
+ ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter&) = default;
~ArenaAllocatorAdapter() = default;
private:
@@ -147,8 +147,8 @@
ArenaAllocatorAdapterKind(other),
arena_allocator_(other.arena_allocator_) {
}
- ArenaAllocatorAdapter(const ArenaAllocatorAdapter& other) = default;
- ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter& other) = default;
+ ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default;
+ ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter&) = default;
~ArenaAllocatorAdapter() = default;
size_type max_size() const {
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index cc1a4a1..3d007ba 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -42,6 +42,7 @@
bool gc;
bool heap;
bool jdwp;
+ bool jit;
bool jni;
bool monitor;
bool profiler;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 745b209..45d2347 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -67,6 +67,7 @@
kReferenceQueueWeakReferencesLock,
kReferenceQueueClearedReferencesLock,
kReferenceProcessorLock,
+ kJitCodeCacheLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 664a909..df79085 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -85,8 +85,8 @@
ArenaAllocatorAdapterKind(other),
arena_stack_(other.arena_stack_) {
}
- ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
- ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
+ ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter&) = default;
+ ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter&) = default;
~ScopedArenaAllocatorAdapter() = default;
private:
@@ -128,8 +128,8 @@
ArenaAllocatorAdapterKind(other),
arena_stack_(other.arena_stack_) {
}
- ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
- ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
+ ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter&) = default;
+ ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter&) = default;
~ScopedArenaAllocatorAdapter() = default;
size_type max_size() const {
diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h
index c9718fc..8655a9e 100644
--- a/runtime/base/variant_map.h
+++ b/runtime/base/variant_map.h
@@ -120,8 +120,8 @@
protected:
// Avoid the object slicing problem; use Clone() instead.
- VariantMapKeyRaw(const VariantMapKeyRaw& other) = default;
- VariantMapKeyRaw(VariantMapKeyRaw&& other) = default;
+ VariantMapKeyRaw(const VariantMapKeyRaw&) = default;
+ VariantMapKeyRaw(VariantMapKeyRaw&&) = default;
private:
size_t key_counter_; // Runtime type ID. Unique each time a new type is reified.
@@ -174,8 +174,8 @@
deleter(reinterpret_cast<TValue*>(value));
}
- VariantMapKey(const VariantMapKey& other) = default;
- VariantMapKey(VariantMapKey&& other) = default;
+ VariantMapKey(const VariantMapKey&) = default;
+ VariantMapKey(VariantMapKey&&) = default;
template <typename Base, template <typename TV> class TKey> friend struct VariantMap;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3278751..2989b8c 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -43,6 +43,8 @@
#include "handle_scope.h"
#include "intern_table.h"
#include "interpreter/interpreter.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "leb128.h"
#include "oat.h"
#include "oat_file.h"
@@ -91,15 +93,14 @@
// a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we
// failed in verification, in which case v2 5.4.1 says we need to re-throw
// the previous error.
- Runtime* runtime = Runtime::Current();
- bool is_compiler = runtime->IsCompiler();
- if (!is_compiler) { // Give info if this occurs at runtime.
+ Runtime* const runtime = Runtime::Current();
+ if (!runtime->IsAotCompiler()) { // Give info if this occurs at runtime.
LOG(INFO) << "Rejecting re-init on previously-failed class " << PrettyClass(c);
}
CHECK(c->IsErroneous()) << PrettyClass(c) << " " << c->GetStatus();
Thread* self = Thread::Current();
- if (is_compiler) {
+ if (runtime->IsAotCompiler()) {
// At compile time, accurate errors and NCDFE are disabled to speed compilation.
mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
self->SetException(ThrowLocation(), pre_allocated);
@@ -428,7 +429,7 @@
// Set up GenericJNI entrypoint. That is mainly a hack for common_compiler_test.h so that
// we do not need friend classes or a publicly exposed setter.
quick_generic_jni_trampoline_ = GetQuickGenericJniStub();
- if (!runtime->IsCompiler()) {
+ if (!runtime->IsAotCompiler()) {
// We need to set up the generic trampolines since we don't have an image.
quick_resolution_trampoline_ = GetQuickResolutionStub();
quick_imt_conflict_trampoline_ = GetQuickImtConflictStub();
@@ -1037,8 +1038,7 @@
const char* oat_location,
std::string* error_msg) {
std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsCompiler(),
- error_msg));
+ !Runtime::Current()->IsAotCompiler(), error_msg));
if (oat_file.get() == nullptr) {
*error_msg = StringPrintf("Failed to find existing oat file at %s: %s", oat_location,
error_msg->c_str());
@@ -1109,8 +1109,8 @@
return nullptr;
}
std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsCompiler(),
- &error_msg));
+ !Runtime::Current()->IsAotCompiler(),
+ &error_msg));
if (oat_file.get() == nullptr) {
std::string compound_msg = StringPrintf("\nFailed to open generated oat file '%s': %s",
oat_location, error_msg.c_str());
@@ -1350,7 +1350,7 @@
*already_opened = false;
const Runtime* runtime = Runtime::Current();
CHECK(runtime != nullptr);
- bool executable = !runtime->IsCompiler();
+ bool executable = !runtime->IsAotCompiler();
std::string odex_error_msg;
bool should_patch_system = false;
@@ -1518,7 +1518,7 @@
bool success = Exec(argv, error_msg);
if (success) {
std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, nullptr, nullptr,
- !runtime->IsCompiler(), error_msg));
+ !runtime->IsAotCompiler(), error_msg));
bool checksum_verified = false;
if (output.get() != nullptr && CheckOatFile(runtime, output.get(), isa, &checksum_verified,
error_msg)) {
@@ -1532,7 +1532,7 @@
"but was unable to open output file '%s': %s",
input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
}
- } else if (!runtime->IsCompiler()) {
+ } else if (!runtime->IsAotCompiler()) {
// patchoat failed which means we probably don't have enough room to place the output oat file,
// instead of failing we should just run the interpreter from the dex files in the input oat.
LOG(WARNING) << "Patching of oat file '" << input_oat << "' failed. Attempting to use oat file "
@@ -1619,22 +1619,20 @@
if (oat_file != nullptr) {
return oat_file;
}
-
- return OatFile::Open(oat_location, oat_location, nullptr, nullptr, !Runtime::Current()->IsCompiler(),
- error_msg);
+ return OatFile::Open(oat_location, oat_location, nullptr, nullptr,
+ !Runtime::Current()->IsAotCompiler(), error_msg);
}
void ClassLinker::InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg) {
ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
DCHECK(obj != nullptr);
DCHECK(class_linker != nullptr);
- size_t pointer_size = class_linker->image_pointer_size_;
-
if (obj->IsArtMethod()) {
mirror::ArtMethod* method = obj->AsArtMethod();
if (!method->IsNative()) {
+ const size_t pointer_size = class_linker->image_pointer_size_;
method->SetEntryPointFromInterpreterPtrSize(artInterpreterToInterpreterBridge, pointer_size);
- if (method != Runtime::Current()->GetResolutionMethod()) {
+ if (!method->IsRuntimeMethod() && method != Runtime::Current()->GetResolutionMethod()) {
method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(),
pointer_size);
}
@@ -1703,8 +1701,8 @@
// bitmap walk.
mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod));
size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize();
- if (!Runtime::Current()->IsCompiler()) {
- // Compiler supports having an image with a different pointer size than the runtime. This
+ if (!Runtime::Current()->IsAotCompiler()) {
+ // Aot compiler supports having an image with a different pointer size than the runtime. This
// happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may
// also use 32 bit dex2oat on a system with 64 bit apps.
CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(sizeof(void*)))
@@ -1719,7 +1717,7 @@
// Set entry point to interpreter if in InterpretOnly mode.
Runtime* runtime = Runtime::Current();
- if (!runtime->IsCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
+ if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
heap->VisitObjects(InitFromImageInterpretOnlyCallback, this);
}
@@ -2135,15 +2133,16 @@
}
mirror::Object* dex_file = dex_file_field->GetObject(element);
if (dex_file != nullptr) {
- const uint64_t cookie = cookie_field->GetLong(dex_file);
- auto* dex_files =
- reinterpret_cast<std::vector<const DexFile*>*>(static_cast<uintptr_t>(cookie));
- if (dex_files == nullptr) {
+ mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
+ if (long_array == nullptr) {
// This should never happen so log a warning.
LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
break;
}
- for (const DexFile* cp_dex_file : *dex_files) {
+ int32_t long_array_size = long_array->GetLength();
+ for (int32_t j = 0; j < long_array_size; ++j) {
+ const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
+ long_array->GetWithoutChecks(j)));
const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
if (dex_class_def != nullptr) {
RegisterDexFile(*cp_dex_file);
@@ -2523,21 +2522,24 @@
}
bool found;
OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
- const void* result = nullptr;
if (found) {
- result = oat_method.GetQuickCode();
- }
-
- if (result == nullptr) {
- if (method->IsNative()) {
- // No code and native? Use generic trampoline.
- result = GetQuickGenericJniStub();
- } else {
- // No code? You must mean to go into the interpreter.
- result = GetQuickToInterpreterBridge();
+ auto* code = oat_method.GetQuickCode();
+ if (code != nullptr) {
+ return code;
}
}
- return result;
+ jit::Jit* const jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ auto* code = jit->GetCodeCache()->GetCodeFor(method);
+ if (code != nullptr) {
+ return code;
+ }
+ }
+ if (method->IsNative()) {
+ // No code and native? Use generic trampoline.
+ return GetQuickGenericJniStub();
+ }
+ return GetQuickToInterpreterBridge();
}
const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
@@ -2546,7 +2548,17 @@
}
bool found;
OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
- return found ? oat_method.GetQuickCode() : nullptr;
+ if (found) {
+ return oat_method.GetQuickCode();
+ }
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ auto* code = jit->GetCodeCache()->GetCodeFor(method);
+ if (code != nullptr) {
+ return code;
+ }
+ }
+ return nullptr;
}
const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
@@ -2582,7 +2594,7 @@
}
Runtime* runtime = Runtime::Current();
if (!runtime->IsStarted() || runtime->UseCompileTimeClassPath()) {
- if (runtime->IsCompiler() || runtime->GetHeap()->HasImageSpace()) {
+ if (runtime->IsAotCompiler() || runtime->GetHeap()->HasImageSpace()) {
return; // OAT file unavailable.
}
}
@@ -2635,7 +2647,7 @@
const OatFile::OatClass* oat_class,
uint32_t class_def_method_index) {
Runtime* runtime = Runtime::Current();
- if (runtime->IsCompiler()) {
+ if (runtime->IsAotCompiler()) {
// The following code only applies to a non-compiler runtime.
return;
}
@@ -3474,7 +3486,7 @@
EnsurePreverifiedMethods(klass);
return;
}
- if (klass->IsCompileTimeVerified() && Runtime::Current()->IsCompiler()) {
+ if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
return;
}
@@ -3490,7 +3502,7 @@
} else {
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime)
<< PrettyClass(klass.Get());
- CHECK(!Runtime::Current()->IsCompiler());
+ CHECK(!Runtime::Current()->IsAotCompiler());
klass->SetStatus(mirror::Class::kStatusVerifyingAtRuntime, self);
}
@@ -3526,7 +3538,7 @@
self->GetException(nullptr)->SetCause(cause.Get());
}
ClassReference ref(klass->GetDexCache()->GetDexFile(), klass->GetDexClassDefIndex());
- if (Runtime::Current()->IsCompiler()) {
+ if (Runtime::Current()->IsAotCompiler()) {
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -3551,7 +3563,7 @@
std::string error_msg;
if (!preverified) {
verifier_failure = verifier::MethodVerifier::VerifyClass(self, klass.Get(),
- Runtime::Current()->IsCompiler(),
+ Runtime::Current()->IsAotCompiler(),
&error_msg);
}
if (preverified || verifier_failure != verifier::MethodVerifier::kHardFailure) {
@@ -3579,7 +3591,7 @@
// Soft failures at compile time should be retried at runtime. Soft
// failures at runtime will be handled by slow paths in the generated
// code. Set status accordingly.
- if (Runtime::Current()->IsCompiler()) {
+ if (Runtime::Current()->IsAotCompiler()) {
klass->SetStatus(mirror::Class::kStatusRetryVerificationAtRuntime, self);
} else {
klass->SetStatus(mirror::Class::kStatusVerified, self);
@@ -3620,7 +3632,7 @@
// we are not compiling the image or if the class we're verifying is not part of
// the app. In other words, we will only check for preverification of bootclasspath
// classes.
- if (Runtime::Current()->IsCompiler()) {
+ if (Runtime::Current()->IsAotCompiler()) {
// Are we compiling the bootclasspath?
if (!Runtime::Current()->UseCompileTimeClassPath()) {
return false;
@@ -3646,7 +3658,7 @@
// image (that we just failed loading), and the verifier can't be run on quickened opcodes when
// the runtime isn't started. On the other hand, app classes can be re-verified even if they are
// already pre-opted, as then the runtime is started.
- if (!Runtime::Current()->IsCompiler() &&
+ if (!Runtime::Current()->IsAotCompiler() &&
!Runtime::Current()->GetHeap()->HasImageSpace() &&
klass->GetClassLoader() != nullptr) {
return false;
@@ -4094,7 +4106,7 @@
CHECK(self->IsExceptionPending());
VlogClassInitializationFailure(klass);
} else {
- CHECK(Runtime::Current()->IsCompiler());
+ CHECK(Runtime::Current()->IsAotCompiler());
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime);
}
return false;
@@ -4229,8 +4241,8 @@
} else if (Runtime::Current()->IsTransactionAborted()) {
// The exception thrown when the transaction aborted has been caught and cleared
// so we need to throw it again now.
- LOG(WARNING) << "Return from class initializer of " << PrettyDescriptor(klass.Get())
- << " without exception while transaction was aborted: re-throw it now.";
+ VLOG(compiler) << "Return from class initializer of " << PrettyDescriptor(klass.Get())
+ << " without exception while transaction was aborted: re-throw it now.";
Runtime::Current()->ThrowInternalErrorForAbortedTransaction(self);
klass->SetStatus(mirror::Class::kStatusError, self);
success = false;
@@ -4275,7 +4287,8 @@
if (klass->GetStatus() == mirror::Class::kStatusInitializing) {
continue;
}
- if (klass->GetStatus() == mirror::Class::kStatusVerified && Runtime::Current()->IsCompiler()) {
+ if (klass->GetStatus() == mirror::Class::kStatusVerified &&
+ Runtime::Current()->IsAotCompiler()) {
// Compile time initialization failed.
return false;
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 13bbdeb..c0dd197 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -287,6 +287,13 @@
Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
}
+ // We only care about how many backward branches were executed in the Jit.
+ void BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method)
+ << " " << dex_pc_offset;
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
} gDebugInstrumentationListener;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index e121a08..c8ede48 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1028,6 +1028,13 @@
// from an oat file, e.g., directly from an apk.
const OatFile* oat_file_;
};
+
+struct DexFileReference {
+ DexFileReference(const DexFile* file, uint32_t idx) : dex_file(file), index(idx) { }
+ const DexFile* dex_file;
+ uint32_t index;
+};
+
std::ostream& operator<<(std::ostream& os, const DexFile& dex_file);
// Iterate over a dex file's ProtoId's paramters
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index a802759..69fe874 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -134,6 +134,23 @@
return os.str();
}
+std::string Instruction::DumpHexLE(size_t instr_code_units) const {
+ size_t inst_length = SizeInCodeUnits();
+ if (inst_length > instr_code_units) {
+ inst_length = instr_code_units;
+ }
+ std::ostringstream os;
+ const uint16_t* insn = reinterpret_cast<const uint16_t*>(this);
+ for (size_t i = 0; i < inst_length; i++) {
+ os << StringPrintf("%02x%02x", static_cast<uint8_t>(insn[i] & 0x00FF),
+ static_cast<uint8_t>((insn[i] & 0xFF00) >> 8)) << " ";
+ }
+ for (size_t i = inst_length; i < instr_code_units; i++) {
+ os << " ";
+ }
+ return os.str();
+}
+
std::string Instruction::DumpString(const DexFile* file) const {
std::ostringstream os;
const char* opcode = kInstructionNames[Opcode()];
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index af5d9d0..d3b9eb4 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -525,6 +525,10 @@
// Dump code_units worth of this instruction, padding to code_units for shorter instructions
std::string DumpHex(size_t code_units) const;
+ // Little-endian dump code_units worth of this instruction, padding to code_units for
+ // shorter instructions
+ std::string DumpHexLE(size_t instr_code_units) const;
+
uint16_t Fetch16(size_t offset) const {
const uint16_t* insns = reinterpret_cast<const uint16_t*>(this);
return insns[offset];
diff --git a/compiler/utils/dex_instruction_utils.h b/runtime/dex_instruction_utils.h
similarity index 82%
rename from compiler/utils/dex_instruction_utils.h
rename to runtime/dex_instruction_utils.h
index bb2c592..1a671c5 100644
--- a/compiler/utils/dex_instruction_utils.h
+++ b/runtime/dex_instruction_utils.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
-#define ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
+#ifndef ART_RUNTIME_DEX_INSTRUCTION_UTILS_H_
+#define ART_RUNTIME_DEX_INSTRUCTION_UTILS_H_
#include "dex_instruction.h"
@@ -58,6 +58,11 @@
opcode != Instruction::RETURN_VOID_BARRIER;
}
+constexpr bool IsInstructionQuickInvoke(Instruction::Code opcode) {
+ return opcode == Instruction::INVOKE_VIRTUAL_QUICK ||
+ opcode == Instruction::INVOKE_VIRTUAL_RANGE_QUICK;
+}
+
constexpr bool IsInstructionInvokeStatic(Instruction::Code opcode) {
return opcode == Instruction::INVOKE_STATIC || opcode == Instruction::INVOKE_STATIC_RANGE;
}
@@ -102,6 +107,11 @@
return Instruction::IGET <= code && code <= Instruction::IPUT_SHORT;
}
+constexpr bool IsInstructionIGetQuickOrIPutQuick(Instruction::Code code) {
+ return (code >= Instruction::IGET_QUICK && code <= Instruction::IPUT_OBJECT_QUICK) ||
+ (code >= Instruction::IPUT_BOOLEAN_QUICK && code <= Instruction::IGET_SHORT_QUICK);
+}
+
constexpr bool IsInstructionSGetOrSPut(Instruction::Code code) {
return Instruction::SGET <= code && code <= Instruction::SPUT_SHORT;
}
@@ -181,6 +191,29 @@
return (code >= Instruction::IPUT) ? IPutMemAccessType(code) : IGetMemAccessType(code);
}
+static inline DexMemAccessType IGetQuickOrIPutQuickMemAccessType(Instruction::Code code) {
+ DCHECK(IsInstructionIGetQuickOrIPutQuick(code));
+ switch (code) {
+ case Instruction::IGET_QUICK: case Instruction::IPUT_QUICK:
+ return kDexMemAccessWord;
+ case Instruction::IGET_WIDE_QUICK: case Instruction::IPUT_WIDE_QUICK:
+ return kDexMemAccessWide;
+ case Instruction::IGET_OBJECT_QUICK: case Instruction::IPUT_OBJECT_QUICK:
+ return kDexMemAccessObject;
+ case Instruction::IGET_BOOLEAN_QUICK: case Instruction::IPUT_BOOLEAN_QUICK:
+ return kDexMemAccessBoolean;
+ case Instruction::IGET_BYTE_QUICK: case Instruction::IPUT_BYTE_QUICK:
+ return kDexMemAccessByte;
+ case Instruction::IGET_CHAR_QUICK: case Instruction::IPUT_CHAR_QUICK:
+ return kDexMemAccessChar;
+ case Instruction::IGET_SHORT_QUICK: case Instruction::IPUT_SHORT_QUICK:
+ return kDexMemAccessShort;
+ default:
+ LOG(FATAL) << code;
+ UNREACHABLE();
+ }
+}
+
constexpr DexMemAccessType SGetOrSPutMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionSGetOrSPut(opcode));
@@ -197,4 +230,4 @@
} // namespace art
-#endif // ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
+#endif // ART_RUNTIME_DEX_INSTRUCTION_UTILS_H_
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index a22e274..3490bcf 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1370,7 +1370,7 @@
reservation_name += file_->GetPath();
std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
reserve_base_override,
- GetLoadedSize(), PROT_NONE, false,
+ GetLoadedSize(), PROT_NONE, false, false,
error_msg));
if (reserve.get() == nullptr) {
*error_msg = StringPrintf("Failed to allocate %s: %s",
@@ -1411,32 +1411,72 @@
} else {
flags |= MAP_PRIVATE;
}
- if (file_length < (program_header->p_offset + program_header->p_memsz)) {
- *error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF segment "
- "%d of %" PRIu64 " bytes: '%s'", file_length, i,
- static_cast<uint64_t>(program_header->p_offset + program_header->p_memsz),
+ if (program_header->p_filesz > program_header->p_memsz) {
+ *error_msg = StringPrintf("Invalid p_filesz > p_memsz (%" PRIu64 " > %" PRIu64 "): %s",
+ static_cast<uint64_t>(program_header->p_filesz),
+ static_cast<uint64_t>(program_header->p_memsz),
file_->GetPath().c_str());
return false;
}
- std::unique_ptr<MemMap> segment(MemMap::MapFileAtAddress(p_vaddr,
- program_header->p_memsz,
- prot, flags, file_->Fd(),
- program_header->p_offset,
- true, // implies MAP_FIXED
- file_->GetPath().c_str(),
- error_msg));
- if (segment.get() == nullptr) {
- *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
- i, file_->GetPath().c_str(), error_msg->c_str());
+ if (program_header->p_filesz < program_header->p_memsz &&
+ !IsAligned<kPageSize>(program_header->p_filesz)) {
+ *error_msg = StringPrintf("Unsupported unaligned p_filesz < p_memsz (%" PRIu64
+ " < %" PRIu64 "): %s",
+ static_cast<uint64_t>(program_header->p_filesz),
+ static_cast<uint64_t>(program_header->p_memsz),
+ file_->GetPath().c_str());
return false;
}
- if (segment->Begin() != p_vaddr) {
- *error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
- "instead mapped to %p",
- i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+ if (file_length < (program_header->p_offset + program_header->p_filesz)) {
+ *error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF segment "
+ "%d of %" PRIu64 " bytes: '%s'", file_length, i,
+ static_cast<uint64_t>(program_header->p_offset + program_header->p_filesz),
+ file_->GetPath().c_str());
return false;
}
- segments_.push_back(segment.release());
+ if (program_header->p_filesz != 0u) {
+ std::unique_ptr<MemMap> segment(
+ MemMap::MapFileAtAddress(p_vaddr,
+ program_header->p_filesz,
+ prot, flags, file_->Fd(),
+ program_header->p_offset,
+ true, // implies MAP_FIXED
+ file_->GetPath().c_str(),
+ error_msg));
+ if (segment.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
+ i, file_->GetPath().c_str(), error_msg->c_str());
+ return false;
+ }
+ if (segment->Begin() != p_vaddr) {
+ *error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
+ "instead mapped to %p",
+ i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+ return false;
+ }
+ segments_.push_back(segment.release());
+ }
+ if (program_header->p_filesz < program_header->p_memsz) {
+ std::string name = StringPrintf("Zero-initialized segment %" PRIu64 " of ELF file %s",
+ static_cast<uint64_t>(i), file_->GetPath().c_str());
+ std::unique_ptr<MemMap> segment(
+ MemMap::MapAnonymous(name.c_str(),
+ p_vaddr + program_header->p_filesz,
+ program_header->p_memsz - program_header->p_filesz,
+ prot, false, true /* reuse */, error_msg));
+ if (segment == nullptr) {
+ *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
+ i, file_->GetPath().c_str(), error_msg->c_str());
+ return false;
+ }
+ if (segment->Begin() != p_vaddr) {
+ *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s "
+ "at expected address %p, instead mapped to %p",
+ i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+ return false;
+ }
+ segments_.push_back(segment.release());
+ }
}
// Now that we are done loading, .dynamic should be in memory to find .dynstr, .dynsym, .hash
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 72734e9..5224d64 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -236,8 +236,8 @@
// Size in number of elements.
void Init() {
std::string error_msg;
- mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(begin_[0]),
- PROT_READ | PROT_WRITE, false, &error_msg));
+ mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
+ PROT_READ | PROT_WRITE, false, false, &error_msg));
CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_->Begin();
CHECK(addr != NULL);
diff --git a/runtime/gc/accounting/bitmap-inl.h b/runtime/gc/accounting/bitmap-inl.h
new file mode 100644
index 0000000..e87a0c0
--- /dev/null
+++ b/runtime/gc/accounting/bitmap-inl.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ACCOUNTING_BITMAP_INL_H_
+#define ART_RUNTIME_GC_ACCOUNTING_BITMAP_INL_H_
+
+#include "bitmap.h"
+
+#include <memory>
+
+#include "atomic.h"
+#include "base/logging.h"
+#include "utils.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+inline bool Bitmap::AtomicTestAndSetBit(uintptr_t bit_index) {
+ CheckValidBitIndex(bit_index);
+ const size_t word_index = BitIndexToWordIndex(bit_index);
+ const uintptr_t word_mask = BitIndexToMask(bit_index);
+ auto* atomic_entry = reinterpret_cast<Atomic<uintptr_t>*>(&bitmap_begin_[word_index]);
+ uintptr_t old_word;
+ do {
+ old_word = atomic_entry->LoadRelaxed();
+ // Fast path: The bit is already set.
+ if ((old_word & word_mask) != 0) {
+ DCHECK(TestBit(bit_index));
+ return true;
+ }
+ } while (!atomic_entry->CompareExchangeWeakSequentiallyConsistent(old_word,
+ old_word | word_mask));
+ DCHECK(TestBit(bit_index));
+ return false;
+}
+
+inline bool Bitmap::TestBit(uintptr_t bit_index) const {
+ CheckValidBitIndex(bit_index);
+ return (bitmap_begin_[BitIndexToWordIndex(bit_index)] & BitIndexToMask(bit_index)) != 0;
+}
+
+template<typename Visitor>
+inline void Bitmap::VisitSetBits(uintptr_t bit_start, uintptr_t bit_end, const Visitor& visitor)
+ const {
+ DCHECK_LE(bit_start, bit_end);
+ CheckValidBitIndex(bit_start);
+ const uintptr_t index_start = BitIndexToWordIndex(bit_start);
+ const uintptr_t index_end = BitIndexToWordIndex(bit_end);
+ if (bit_start != bit_end) {
+ CheckValidBitIndex(bit_end - 1);
+ }
+
+ // Index(begin) ... Index(end)
+ // [xxxxx???][........][????yyyy]
+ // ^ ^
+ // | #---- Bit of visit_end
+ // #---- Bit of visit_begin
+ //
+
+ // Left edge.
+ uintptr_t left_edge = bitmap_begin_[index_start];
+ // Clear the lower bits that are not in range.
+ left_edge &= ~((static_cast<uintptr_t>(1) << (bit_start % kBitsPerBitmapWord)) - 1);
+
+ // Right edge. Either unique, or left_edge.
+ uintptr_t right_edge;
+
+ if (index_start < index_end) {
+ // Left edge != right edge.
+
+ // Traverse left edge.
+ if (left_edge != 0) {
+ const uintptr_t ptr_base = WordIndexToBitIndex(index_start);
+ do {
+ const size_t shift = CTZ(left_edge);
+ visitor(ptr_base + shift);
+ left_edge ^= static_cast<uintptr_t>(1) << shift;
+ } while (left_edge != 0);
+ }
+
+ // Traverse the middle, full part.
+ for (size_t i = index_start + 1; i < index_end; ++i) {
+ uintptr_t w = bitmap_begin_[i];
+ if (w != 0) {
+ const uintptr_t ptr_base = WordIndexToBitIndex(i);
+ do {
+ const size_t shift = CTZ(w);
+ visitor(ptr_base + shift);
+ w ^= static_cast<uintptr_t>(1) << shift;
+ } while (w != 0);
+ }
+ }
+
+ // Right edge is unique.
+ // But maybe we don't have anything to do: visit_end starts in a new word...
+ if (bit_end == 0) {
+ // Do not read memory, as it could be after the end of the bitmap.
+ right_edge = 0;
+ } else {
+ right_edge = bitmap_begin_[index_end];
+ }
+ } else {
+ right_edge = left_edge;
+ }
+
+ // Right edge handling.
+ right_edge &= ((static_cast<uintptr_t>(1) << (bit_end % kBitsPerBitmapWord)) - 1);
+ if (right_edge != 0) {
+ const uintptr_t ptr_base = WordIndexToBitIndex(index_end);
+ do {
+ const size_t shift = CTZ(right_edge);
+ visitor(ptr_base + shift);
+ right_edge ^= (static_cast<uintptr_t>(1)) << shift;
+ } while (right_edge != 0);
+ }
+}
+
+template<bool kSetBit>
+inline bool Bitmap::ModifyBit(uintptr_t bit_index) {
+ CheckValidBitIndex(bit_index);
+ const size_t word_index = BitIndexToWordIndex(bit_index);
+ const uintptr_t word_mask = BitIndexToMask(bit_index);
+ uintptr_t* address = &bitmap_begin_[word_index];
+ uintptr_t old_word = *address;
+ if (kSetBit) {
+ *address = old_word | word_mask;
+ } else {
+ *address = old_word & ~word_mask;
+ }
+ DCHECK_EQ(TestBit(bit_index), kSetBit);
+ return (old_word & word_mask) != 0;
+}
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_ACCOUNTING_BITMAP_INL_H_
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
new file mode 100644
index 0000000..20984fd
--- /dev/null
+++ b/runtime/gc/accounting/bitmap.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bitmap-inl.h"
+
+#include "card_table.h"
+#include "mem_map.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+Bitmap* Bitmap::CreateFromMemMap(MemMap* mem_map, size_t num_bits) {
+ CHECK(mem_map != nullptr);
+ return new Bitmap(mem_map, num_bits);
+}
+
+Bitmap::Bitmap(MemMap* mem_map, size_t bitmap_size)
+ : mem_map_(mem_map), bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map->Begin())),
+ bitmap_size_(bitmap_size) {
+ CHECK(bitmap_begin_ != nullptr);
+ CHECK_NE(bitmap_size, 0U);
+}
+
+MemMap* Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
+ const size_t bitmap_size = RoundUp(
+ RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
+ std::string error_msg;
+ std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
+ PROT_READ | PROT_WRITE, false, false,
+ &error_msg));
+ if (UNLIKELY(mem_map.get() == nullptr)) {
+ LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
+ return nullptr;
+ }
+ return mem_map.release();
+}
+
+Bitmap* Bitmap::Create(const std::string& name, size_t num_bits) {
+ auto* const mem_map = AllocateMemMap(name, num_bits);
+ if (mem_map == nullptr) {
+ return nullptr;
+ }
+ return CreateFromMemMap(mem_map, num_bits);
+}
+
+void Bitmap::Clear() {
+ if (bitmap_begin_ != nullptr) {
+ mem_map_->MadviseDontNeedAndZero();
+ }
+}
+
+void Bitmap::CopyFrom(Bitmap* source_bitmap) {
+ DCHECK_EQ(BitmapSize(), source_bitmap->BitmapSize());
+ std::copy(source_bitmap->Begin(),
+ source_bitmap->Begin() + BitmapSize() / kBitsPerBitmapWord, Begin());
+}
+
+template<size_t kAlignment>
+MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::Create(
+ const std::string& name, uintptr_t cover_begin, uintptr_t cover_end) {
+ CHECK_ALIGNED(cover_begin, kAlignment);
+ CHECK_ALIGNED(cover_end, kAlignment);
+ const size_t num_bits = (cover_end - cover_begin) / kAlignment;
+ auto* const mem_map = Bitmap::AllocateMemMap(name, num_bits);
+ return CreateFromMemMap(mem_map, cover_begin, num_bits);
+}
+
+template<size_t kAlignment>
+MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::CreateFromMemMap(
+ MemMap* mem_map, uintptr_t begin, size_t num_bits) {
+ return new MemoryRangeBitmap(mem_map, begin, num_bits);
+}
+
+template class MemoryRangeBitmap<CardTable::kCardSize>;
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
new file mode 100644
index 0000000..cf2c293
--- /dev/null
+++ b/runtime/gc/accounting/bitmap.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ACCOUNTING_BITMAP_H_
+#define ART_RUNTIME_GC_ACCOUNTING_BITMAP_H_
+
+#include <limits.h>
+#include <stdint.h>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include "base/mutex.h"
+#include "globals.h"
+#include "object_callbacks.h"
+
+namespace art {
+
+class MemMap;
+
+namespace gc {
+namespace accounting {
+
+// TODO: Use this code to implement SpaceBitmap.
+class Bitmap {
+ public:
+ // Create and initialize a bitmap with size num_bits. Storage is allocated with a MemMap.
+ static Bitmap* Create(const std::string& name, size_t num_bits);
+
+ // Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
+ // mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
+ // Objects are kAlignement-aligned.
+ static Bitmap* CreateFromMemMap(MemMap* mem_map, size_t num_bits);
+
+ // offset is the difference from base to a index.
+ static ALWAYS_INLINE constexpr size_t BitIndexToWordIndex(uintptr_t offset) {
+ return offset / kBitsPerBitmapWord;
+ }
+
+ template<typename T>
+ static ALWAYS_INLINE constexpr T WordIndexToBitIndex(T word_index) {
+ return static_cast<T>(word_index * kBitsPerBitmapWord);
+ }
+
+ static ALWAYS_INLINE constexpr uintptr_t BitIndexToMask(uintptr_t bit_index) {
+ return static_cast<uintptr_t>(1) << (bit_index % kBitsPerBitmapWord);
+ }
+
+ ALWAYS_INLINE bool SetBit(size_t bit_index) {
+ return ModifyBit<true>(bit_index);
+ }
+
+ ALWAYS_INLINE bool ClearBit(size_t bit_index) {
+ return ModifyBit<false>(bit_index);
+ }
+
+ ALWAYS_INLINE bool TestBit(size_t bit_index) const;
+
+ // Returns true if the bit_index was previously set.
+ ALWAYS_INLINE bool AtomicTestAndSetBit(size_t bit_index);
+
+ // Fill the bitmap with zeroes. Returns the bitmap's memory to the system as a side-effect.
+ void Clear();
+
+ // Visit the all the set bits range [visit_begin, visit_end) where visit_begin and visit_end are
+ // bit indices visitor is called with the index of each set bit.
+ template <typename Visitor>
+ void VisitSetBits(uintptr_t visit_begin, size_t visit_end, const Visitor& visitor) const;
+
+ void CopyFrom(Bitmap* source_bitmap);
+
+ // Starting address of our internal storage.
+ uintptr_t* Begin() {
+ return bitmap_begin_;
+ }
+
+ // Size of our bitmap in bits.
+ size_t BitmapSize() const {
+ return bitmap_size_;
+ }
+
+ // Check that a bit index is valid with a DCHECK.
+ ALWAYS_INLINE void CheckValidBitIndex(size_t bit_index) const {
+ DCHECK_LT(bit_index, BitmapSize());
+ }
+
+ std::string Dump() const;
+
+ protected:
+ static constexpr size_t kBitsPerBitmapWord = sizeof(uintptr_t) * kBitsPerByte;
+
+ Bitmap(MemMap* mem_map, size_t bitmap_size);
+
+ // Allocate the mem-map for a bitmap based on how many bits are required.
+ static MemMap* AllocateMemMap(const std::string& name, size_t num_bits);
+
+ template<bool kSetBit>
+ ALWAYS_INLINE bool ModifyBit(uintptr_t bit_index);
+
+ // Backing storage for bitmap.
+ std::unique_ptr<MemMap> mem_map_;
+
+ // This bitmap itself, word sized for efficiency in scanning.
+ uintptr_t* const bitmap_begin_;
+
+ // Number of bits in the bitmap.
+ const size_t bitmap_size_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Bitmap);
+};
+
+// One bit per kAlignment in range (start, end]
+template<size_t kAlignment>
+class MemoryRangeBitmap : public Bitmap {
+ public:
+ static MemoryRangeBitmap* Create(const std::string& name, uintptr_t cover_begin,
+ uintptr_t cover_end);
+ static MemoryRangeBitmap* CreateFromMemMap(MemMap* mem_map, uintptr_t cover_begin,
+ size_t num_bits);
+
+ // Beginning of the memory range that the bitmap covers.
+ ALWAYS_INLINE uintptr_t CoverBegin() const {
+ return cover_begin_;
+ }
+
+ // End of the memory range that the bitmap covers.
+ ALWAYS_INLINE uintptr_t CoverEnd() const {
+ return cover_end_;
+ }
+
+ // Return the address associated with a bit index.
+ ALWAYS_INLINE uintptr_t AddrFromBitIndex(size_t bit_index) const {
+ const uintptr_t addr = CoverBegin() + bit_index * kAlignment;
+ DCHECK_EQ(BitIndexFromAddr(addr), bit_index);
+ return addr;
+ }
+
+ // Return the bit index associated with an address .
+ ALWAYS_INLINE uintptr_t BitIndexFromAddr(uintptr_t addr) const {
+ DCHECK(HasAddress(addr)) << CoverBegin() << " <= " << addr << " < " << CoverEnd();
+ return (addr - CoverBegin()) / kAlignment;
+ }
+
+ ALWAYS_INLINE bool HasAddress(const uintptr_t addr) const {
+ return cover_begin_ <= addr && addr < cover_end_;
+ }
+
+ ALWAYS_INLINE bool Set(uintptr_t addr) {
+ return SetBit(BitIndexFromAddr(addr));
+ }
+
+ ALWAYS_INLINE bool Clear(size_t addr) {
+ return ClearBit(BitIndexFromAddr(addr));
+ }
+
+ ALWAYS_INLINE bool Test(size_t addr) const {
+ return TestBit(BitIndexFromAddr(addr));
+ }
+
+ // Returns true if the object was previously set.
+ ALWAYS_INLINE bool AtomicTestAndSet(size_t addr) {
+ return AtomicTestAndSetBit(BitIndexFromAddr(addr));
+ }
+
+ private:
+ MemoryRangeBitmap(MemMap* mem_map, uintptr_t begin, size_t num_bits)
+ : Bitmap(mem_map, num_bits), cover_begin_(begin), cover_end_(begin + kAlignment * num_bits) {
+ }
+
+ uintptr_t const cover_begin_;
+ uintptr_t const cover_end_;
+};
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_ACCOUNTING_BITMAP_H_
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index ca1e7c1..ad1f192 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -62,7 +62,7 @@
std::string error_msg;
std::unique_ptr<MemMap> mem_map(
MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
- false, &error_msg));
+ false, false, &error_msg));
CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index b1ccc0b..a3fac58 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -19,6 +19,7 @@
#include <memory>
#include "base/stl_util.h"
+#include "bitmap-inl.h"
#include "card_table-inl.h"
#include "heap_bitmap.h"
#include "gc/accounting/space_bitmap-inl.h"
@@ -40,14 +41,14 @@
namespace gc {
namespace accounting {
-class ModUnionClearCardSetVisitor {
+class ModUnionAddToCardSetVisitor {
public:
- explicit ModUnionClearCardSetVisitor(ModUnionTable::CardSet* const cleared_cards)
- : cleared_cards_(cleared_cards) {
+ explicit ModUnionAddToCardSetVisitor(ModUnionTable::CardSet* const cleared_cards)
+ : cleared_cards_(cleared_cards) {
}
- inline void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
- UNUSED(new_value);
+ inline void operator()(uint8_t* card, uint8_t expected_value,
+ uint8_t new_value ATTRIBUTE_UNUSED) const {
if (expected_value == CardTable::kCardDirty) {
cleared_cards_->insert(card);
}
@@ -57,18 +58,38 @@
ModUnionTable::CardSet* const cleared_cards_;
};
-class ModUnionClearCardVisitor {
+class ModUnionAddToCardBitmapVisitor {
public:
- explicit ModUnionClearCardVisitor(std::vector<uint8_t*>* cleared_cards)
- : cleared_cards_(cleared_cards) {
+ explicit ModUnionAddToCardBitmapVisitor(ModUnionTable::CardBitmap* bitmap,
+ CardTable* card_table)
+ : bitmap_(bitmap), card_table_(card_table) {
}
- void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card) const {
- UNUSED(new_card);
+ inline void operator()(uint8_t* card, uint8_t expected_value,
+ uint8_t new_value ATTRIBUTE_UNUSED) const {
+ if (expected_value == CardTable::kCardDirty) {
+ // We want the address the card represents, not the address of the card.
+ bitmap_->Set(reinterpret_cast<uintptr_t>(card_table_->AddrFromCard(card)));
+ }
+ }
+
+ private:
+ ModUnionTable::CardBitmap* const bitmap_;
+ CardTable* const card_table_;
+};
+
+class ModUnionAddToCardVectorVisitor {
+ public:
+ explicit ModUnionAddToCardVectorVisitor(std::vector<uint8_t*>* cleared_cards)
+ : cleared_cards_(cleared_cards) {
+ }
+
+ void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card ATTRIBUTE_UNUSED) const {
if (expected_card == CardTable::kCardDirty) {
cleared_cards_->push_back(card);
}
}
+
private:
std::vector<uint8_t*>* const cleared_cards_;
};
@@ -77,19 +98,19 @@
public:
ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg,
space::ContinuousSpace* from_space,
- space::ImageSpace* image_space,
+ space::ContinuousSpace* immune_space,
bool* contains_reference_to_other_space)
- : callback_(callback), arg_(arg), from_space_(from_space), image_space_(image_space),
+ : callback_(callback), arg_(arg), from_space_(from_space), immune_space_(immune_space),
contains_reference_to_other_space_(contains_reference_to_other_space) {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
- mirror::HeapReference<Object>* obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
+ mirror::HeapReference<Object>* const obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
mirror::Object* ref = obj_ptr->AsMirrorPtr();
- if (ref != nullptr && !from_space_->HasAddress(ref) && !image_space_->HasAddress(ref)) {
+ if (ref != nullptr && !from_space_->HasAddress(ref) && !immune_space_->HasAddress(ref)) {
*contains_reference_to_other_space_ = true;
callback_(obj_ptr, arg_);
}
@@ -97,27 +118,30 @@
private:
MarkHeapReferenceCallback* const callback_;
- void* arg_;
+ void* const arg_;
// Space which we are scanning
space::ContinuousSpace* const from_space_;
- space::ImageSpace* const image_space_;
+ space::ContinuousSpace* const immune_space_;
// Set if we have any references to another space.
bool* const contains_reference_to_other_space_;
};
class ModUnionScanImageRootVisitor {
public:
+ // Immune space is any other space which we don't care about references to. Currently this is
+ // the image space in the case of the zygote mod union table.
ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg,
- space::ContinuousSpace* from_space, space::ImageSpace* image_space,
+ space::ContinuousSpace* from_space,
+ space::ContinuousSpace* immune_space,
bool* contains_reference_to_other_space)
- : callback_(callback), arg_(arg), from_space_(from_space), image_space_(image_space),
+ : callback_(callback), arg_(arg), from_space_(from_space), immune_space_(immune_space),
contains_reference_to_other_space_(contains_reference_to_other_space) {}
void operator()(Object* root) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(root != NULL);
- ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_, from_space_, image_space_,
+ DCHECK(root != nullptr);
+ ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_, from_space_, immune_space_,
contains_reference_to_other_space_);
root->VisitReferences<kMovingClasses>(ref_visitor, VoidFunctor());
}
@@ -127,14 +151,14 @@
void* const arg_;
// Space which we are scanning
space::ContinuousSpace* const from_space_;
- space::ImageSpace* const image_space_;
+ space::ContinuousSpace* const immune_space_;
// Set if we have any references to another space.
bool* const contains_reference_to_other_space_;
};
void ModUnionTableReferenceCache::ClearCards() {
CardTable* card_table = GetHeap()->GetCardTable();
- ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+ ModUnionAddToCardSetVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
card_table->ModifyCardsAtomic(space_->Begin(), space_->End(), AgeCardVisitor(), visitor);
}
@@ -324,9 +348,54 @@
}
}
+ModUnionTableCardCache::ModUnionTableCardCache(const std::string& name, Heap* heap,
+ space::ContinuousSpace* space)
+ : ModUnionTable(name, heap, space) {
+ // Normally here we could use End() instead of Limit(), but for testing we may want to have a
+ // mod-union table for a space which can still grow.
+ if (!space->IsImageSpace()) {
+ CHECK_ALIGNED(reinterpret_cast<uintptr_t>(space->Limit()), CardTable::kCardSize);
+ }
+ card_bitmap_.reset(CardBitmap::Create(
+ "mod union bitmap", reinterpret_cast<uintptr_t>(space->Begin()),
+ RoundUp(reinterpret_cast<uintptr_t>(space->Limit()), CardTable::kCardSize)));
+}
+
+class CardBitVisitor {
+ public:
+ CardBitVisitor(MarkHeapReferenceCallback* callback, void* arg, space::ContinuousSpace* space,
+ space::ContinuousSpace* immune_space, ModUnionTable::CardBitmap* card_bitmap)
+ : callback_(callback), arg_(arg), space_(space), immune_space_(immune_space),
+ bitmap_(space->GetLiveBitmap()), card_bitmap_(card_bitmap) {
+ DCHECK(immune_space_ != nullptr);
+ }
+
+ void operator()(size_t bit_index) const {
+ const uintptr_t start = card_bitmap_->AddrFromBitIndex(bit_index);
+ DCHECK(space_->HasAddress(reinterpret_cast<mirror::Object*>(start)))
+ << start << " " << *space_;
+ bool reference_to_other_space = false;
+ ModUnionScanImageRootVisitor scan_visitor(callback_, arg_, space_, immune_space_,
+ &reference_to_other_space);
+ bitmap_->VisitMarkedRange(start, start + CardTable::kCardSize, scan_visitor);
+ if (!reference_to_other_space) {
+ // No non null reference to another space, clear the bit.
+ card_bitmap_->ClearBit(bit_index);
+ }
+ }
+
+ private:
+ MarkHeapReferenceCallback* const callback_;
+ void* const arg_;
+ space::ContinuousSpace* const space_;
+ space::ContinuousSpace* const immune_space_;
+ ContinuousSpaceBitmap* const bitmap_;
+ ModUnionTable::CardBitmap* const card_bitmap_;
+};
+
void ModUnionTableCardCache::ClearCards() {
- CardTable* card_table = GetHeap()->GetCardTable();
- ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+ CardTable* const card_table = GetHeap()->GetCardTable();
+ ModUnionAddToCardBitmapVisitor visitor(card_bitmap_.get(), card_table);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
card_table->ModifyCardsAtomic(space_->Begin(), space_->End(), AgeCardVisitor(), visitor);
}
@@ -334,46 +403,51 @@
// Mark all references to the alloc space(s).
void ModUnionTableCardCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
void* arg) {
- CardTable* card_table = heap_->GetCardTable();
- space::ImageSpace* image_space = heap_->GetImageSpace();
- ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
- bool reference_to_other_space = false;
- ModUnionScanImageRootVisitor scan_visitor(callback, arg, space_, image_space,
- &reference_to_other_space);
- for (auto it = cleared_cards_.begin(), end = cleared_cards_.end(); it != end; ) {
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(*it));
- DCHECK(space_->HasAddress(reinterpret_cast<Object*>(start)));
- reference_to_other_space = false;
- bitmap->VisitMarkedRange(start, start + CardTable::kCardSize, scan_visitor);
- if (!reference_to_other_space) {
- // No non null reference to another space, remove the card.
- it = cleared_cards_.erase(it);
- } else {
- ++it;
- }
- }
+ auto* image_space = heap_->GetImageSpace();
+ // If we don't have an image space, just pass in space_ as the immune space. Pass in the same
+ // space_ instead of image_space to avoid a null check in ModUnionUpdateObjectReferencesVisitor.
+ CardBitVisitor visitor(callback, arg, space_, image_space != nullptr ? image_space : space_,
+ card_bitmap_.get());
+ card_bitmap_->VisitSetBits(
+ 0, RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize, visitor);
}
void ModUnionTableCardCache::Dump(std::ostream& os) {
- CardTable* card_table = heap_->GetCardTable();
os << "ModUnionTable dirty cards: [";
- for (const uint8_t* card_addr : cleared_cards_) {
- auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
- auto end = start + CardTable::kCardSize;
- os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "\n";
+ // TODO: Find cleaner way of doing this.
+ for (uint8_t* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
+ addr += CardTable::kCardSize) {
+ if (card_bitmap_->Test(reinterpret_cast<uintptr_t>(addr))) {
+ os << reinterpret_cast<void*>(addr) << "-"
+ << reinterpret_cast<void*>(addr + CardTable::kCardSize) << "\n";
+ }
}
os << "]";
}
void ModUnionTableCardCache::SetCards() {
- CardTable* card_table = heap_->GetCardTable();
+ // Only clean up to the end since there cannot be any objects past the End() of the space.
for (uint8_t* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
addr += CardTable::kCardSize) {
- cleared_cards_.insert(card_table->CardFromAddr(addr));
+ card_bitmap_->Set(reinterpret_cast<uintptr_t>(addr));
}
}
+bool ModUnionTableCardCache::ContainsCardFor(uintptr_t addr) {
+ return card_bitmap_->Test(addr);
+}
+
void ModUnionTableReferenceCache::SetCards() {
+ for (uint8_t* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
+ addr += CardTable::kCardSize) {
+ cleared_cards_.insert(heap_->GetCardTable()->CardFromAddr(reinterpret_cast<void*>(addr)));
+ }
+}
+
+bool ModUnionTableReferenceCache::ContainsCardFor(uintptr_t addr) {
+ auto* card_ptr = heap_->GetCardTable()->CardFromAddr(reinterpret_cast<void*>(addr));
+ return cleared_cards_.find(card_ptr) != cleared_cards_.end() ||
+ references_.find(card_ptr) != references_.end();
}
} // namespace accounting
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index d6342cf..2e232ca 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -17,7 +17,9 @@
#ifndef ART_RUNTIME_GC_ACCOUNTING_MOD_UNION_TABLE_H_
#define ART_RUNTIME_GC_ACCOUNTING_MOD_UNION_TABLE_H_
+#include "bitmap.h"
#include "base/allocator.h"
+#include "card_table.h"
#include "globals.h"
#include "object_callbacks.h"
#include "safe_map.h"
@@ -44,6 +46,7 @@
namespace accounting {
+class Bitmap;
class HeapBitmap;
// The mod-union table is the union of modified cards. It is used to allow the card table to be
@@ -52,6 +55,7 @@
public:
typedef std::set<uint8_t*, std::less<uint8_t*>,
TrackingAllocator<uint8_t*, kAllocatorTagModUnionCardSet>> CardSet;
+ typedef MemoryRangeBitmap<CardTable::kCardSize> CardBitmap;
explicit ModUnionTable(const std::string& name, Heap* heap, space::ContinuousSpace* space)
: name_(name),
@@ -80,6 +84,10 @@
// bitmap or not.
virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
+ // Returns true if a card is marked inside the mod union table. Used for testing. The address
+ // doesn't need to be aligned.
+ virtual bool ContainsCardFor(uintptr_t addr) = 0;
+
virtual void Dump(std::ostream& os) = 0;
space::ContinuousSpace* GetSpace() {
return space_;
@@ -106,25 +114,27 @@
virtual ~ModUnionTableReferenceCache() {}
// Clear and store cards for a space.
- void ClearCards();
+ void ClearCards() OVERRIDE;
// Update table based on cleared cards and mark all references to the other spaces.
- void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg)
+ void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
- void Verify()
+ void Verify() OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
- void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
- void SetCards() OVERRIDE;
+ virtual void Dump(std::ostream& os) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ virtual void SetCards() OVERRIDE;
protected:
// Cleared card array, used to update the mod-union table.
@@ -138,28 +148,32 @@
// Card caching implementation. Keeps track of which cards we cleared and only this information.
class ModUnionTableCardCache : public ModUnionTable {
public:
- explicit ModUnionTableCardCache(const std::string& name, Heap* heap, space::ContinuousSpace* space)
- : ModUnionTable(name, heap, space) {}
+ // Note: There is assumption that the space End() doesn't change.
+ explicit ModUnionTableCardCache(const std::string& name, Heap* heap,
+ space::ContinuousSpace* space);
virtual ~ModUnionTableCardCache() {}
// Clear and store cards for a space.
- void ClearCards();
+ virtual void ClearCards() OVERRIDE;
// Mark all references to the alloc space(s).
- void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg)
+ virtual void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Nothing to verify.
- void Verify() {}
+ virtual void Verify() OVERRIDE {}
- void Dump(std::ostream& os);
+ virtual void Dump(std::ostream& os) OVERRIDE;
- void SetCards() OVERRIDE;
+ virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+
+ // Sets all the cards in the mod union table to be marked.
+ virtual void SetCards() OVERRIDE;
protected:
- // Cleared card array, used to update the mod-union table.
- CardSet cleared_cards_;
+ // Cleared card bitmap, used to update the mod-union table.
+ std::unique_ptr<CardBitmap> card_bitmap_;
};
} // namespace accounting
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
new file mode 100644
index 0000000..87ce166
--- /dev/null
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mod_union_table-inl.h"
+
+#include "common_runtime_test.h"
+#include "gc/space/space-inl.h"
+#include "mirror/array-inl.h"
+#include "space_bitmap-inl.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+class ModUnionTableFactory {
+ public:
+ enum TableType {
+ kTableTypeCardCache,
+ kTableTypeReferenceCache,
+ kTableTypeCount, // Number of values in the enum.
+ };
+
+ // Target space is ignored for the card cache implementation.
+ static ModUnionTable* Create(
+ TableType type, space::ContinuousSpace* space, space::ContinuousSpace* target_space);
+};
+
+class ModUnionTableTest : public CommonRuntimeTest {
+ public:
+ ModUnionTableTest() : java_lang_object_array_(nullptr) {
+ }
+ mirror::ObjectArray<mirror::Object>* AllocObjectArray(
+ Thread* self, space::ContinuousMemMapAllocSpace* space, size_t component_count)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ auto* klass = GetObjectArrayClass(self, space);
+ const size_t size = ComputeArraySize(self, klass, component_count, 2);
+ size_t bytes_allocated = 0;
+ auto* obj = down_cast<mirror::ObjectArray<mirror::Object>*>(
+ space->Alloc(self, size, &bytes_allocated, nullptr));
+ if (obj != nullptr) {
+ obj->SetClass(klass);
+ obj->SetLength(static_cast<int32_t>(component_count));
+ space->GetLiveBitmap()->Set(obj);
+ EXPECT_GE(bytes_allocated, size);
+ }
+ return obj;
+ }
+ void ResetClass() {
+ java_lang_object_array_ = nullptr;
+ }
+ void RunTest(ModUnionTableFactory::TableType type);
+
+ private:
+ mirror::Class* GetObjectArrayClass(Thread* self, space::ContinuousMemMapAllocSpace* space)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (java_lang_object_array_ == nullptr) {
+ java_lang_object_array_ =
+ Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kObjectArrayClass);
+ // Since the test doesn't have an image, the class of the object array keeps cards live
+ // inside the card cache mod-union table and causes the check
+ // ASSERT_FALSE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(obj3)));
+ // to fail since the class ends up keeping the card dirty. To get around this, we make a fake
+ // copy of the class in the same space that we are allocating in.
+ DCHECK(java_lang_object_array_ != nullptr);
+ const size_t class_size = java_lang_object_array_->GetClassSize();
+ size_t bytes_allocated = 0;
+ auto* klass = down_cast<mirror::Class*>(space->Alloc(self, class_size, &bytes_allocated,
+ nullptr));
+ DCHECK(klass != nullptr);
+ memcpy(klass, java_lang_object_array_, class_size);
+ Runtime::Current()->GetHeap()->GetCardTable()->MarkCard(klass);
+ java_lang_object_array_ = klass;
+ }
+ return java_lang_object_array_;
+ }
+ mirror::Class* java_lang_object_array_;
+};
+
+// Collect visited objects into container.
+static void CollectVisitedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(ref != nullptr);
+ DCHECK(arg != nullptr);
+ reinterpret_cast<std::set<mirror::Object*>*>(arg)->insert(ref->AsMirrorPtr());
+}
+
+// A mod union table that only holds references to a specified target space.
+class ModUnionTableRefCacheToSpace : public ModUnionTableReferenceCache {
+ public:
+ explicit ModUnionTableRefCacheToSpace(
+ const std::string& name, Heap* heap, space::ContinuousSpace* space,
+ space::ContinuousSpace* target_space)
+ : ModUnionTableReferenceCache(name, heap, space), target_space_(target_space) {}
+
+ bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE {
+ return target_space_->HasAddress(ref);
+ }
+
+ private:
+ space::ContinuousSpace* const target_space_;
+};
+
+std::ostream& operator<<(std::ostream& oss, ModUnionTableFactory::TableType type) {
+ switch (type) {
+ case ModUnionTableFactory::kTableTypeCardCache: {
+ oss << "CardCache";
+ break;
+ }
+ case ModUnionTableFactory::kTableTypeReferenceCache: {
+ oss << "ReferenceCache";
+ break;
+ }
+ default: {
+ UNIMPLEMENTED(FATAL) << static_cast<size_t>(type);
+ }
+ }
+ return oss;
+}
+
+ModUnionTable* ModUnionTableFactory::Create(
+ TableType type, space::ContinuousSpace* space, space::ContinuousSpace* target_space) {
+ std::ostringstream name;
+ name << "Mod union table: " << type;
+ switch (type) {
+ case kTableTypeCardCache: {
+ return new ModUnionTableCardCache(name.str(), Runtime::Current()->GetHeap(), space);
+ }
+ case kTableTypeReferenceCache: {
+ return new ModUnionTableRefCacheToSpace(name.str(), Runtime::Current()->GetHeap(), space,
+ target_space);
+ }
+ default: {
+ UNIMPLEMENTED(FATAL) << "Invalid type " << type;
+ }
+ }
+ return nullptr;
+}
+
+TEST_F(ModUnionTableTest, TestCardCache) {
+ RunTest(ModUnionTableFactory::kTableTypeCardCache);
+}
+
+TEST_F(ModUnionTableTest, TestReferenceCache) {
+ RunTest(ModUnionTableFactory::kTableTypeReferenceCache);
+}
+
+void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) {
+ Thread* const self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ Runtime* const runtime = Runtime::Current();
+ gc::Heap* const heap = runtime->GetHeap();
+ // Use non moving space since moving GC don't necessarily have a primary free list space.
+ auto* space = heap->GetNonMovingSpace();
+ ResetClass();
+ // Create another space that we can put references in.
+ std::unique_ptr<space::DlMallocSpace> other_space(space::DlMallocSpace::Create(
+ "other space", 128 * KB, 4 * MB, 4 * MB, nullptr, false));
+ ASSERT_TRUE(other_space.get() != nullptr);
+ heap->AddSpace(other_space.get());
+ std::unique_ptr<ModUnionTable> table(ModUnionTableFactory::Create(
+ type, space, other_space.get()));
+ ASSERT_TRUE(table.get() != nullptr);
+ // Create some fake objects and put the main space and dirty cards in the non moving space.
+ auto* obj1 = AllocObjectArray(self, space, CardTable::kCardSize);
+ ASSERT_TRUE(obj1 != nullptr);
+ auto* obj2 = AllocObjectArray(self, space, CardTable::kCardSize);
+ ASSERT_TRUE(obj2 != nullptr);
+ auto* obj3 = AllocObjectArray(self, space, CardTable::kCardSize);
+ ASSERT_TRUE(obj3 != nullptr);
+ auto* obj4 = AllocObjectArray(self, space, CardTable::kCardSize);
+ ASSERT_TRUE(obj4 != nullptr);
+ // Dirty some cards.
+ obj1->Set(0, obj2);
+ obj2->Set(0, obj3);
+ obj3->Set(0, obj4);
+ obj4->Set(0, obj1);
+ // Dirty some more cards to objects in another space.
+ auto* other_space_ref1 = AllocObjectArray(self, other_space.get(), CardTable::kCardSize);
+ ASSERT_TRUE(other_space_ref1 != nullptr);
+ auto* other_space_ref2 = AllocObjectArray(self, other_space.get(), CardTable::kCardSize);
+ ASSERT_TRUE(other_space_ref2 != nullptr);
+ obj1->Set(1, other_space_ref1);
+ obj2->Set(3, other_space_ref2);
+ table->ClearCards();
+ std::set<mirror::Object*> visited;
+ table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited);
+ // Check that we visited all the references in other spaces only.
+ ASSERT_GE(visited.size(), 2u);
+ ASSERT_TRUE(visited.find(other_space_ref1) != visited.end());
+ ASSERT_TRUE(visited.find(other_space_ref2) != visited.end());
+ // Verify that all the other references were visited.
+ // obj1, obj2 cards should still be in mod union table since they have references to other
+ // spaces.
+ ASSERT_TRUE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(obj1)));
+ ASSERT_TRUE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(obj2)));
+ // obj3, obj4 don't have a reference to any object in the other space, their cards should have
+ // been removed from the mod union table during UpdateAndMarkReferences.
+ ASSERT_FALSE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(obj3)));
+ ASSERT_FALSE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(obj4)));
+ {
+ // Currently no-op, make sure it still works however.
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ table->Verify();
+ }
+ // Verify that dump doesn't crash.
+ std::ostringstream oss;
+ table->Dump(oss);
+ // Set all the cards, then verify.
+ table->SetCards();
+ // TODO: Check that the cards are actually set.
+ for (auto* ptr = space->Begin(); ptr < AlignUp(space->End(), CardTable::kCardSize);
+ ptr += CardTable::kCardSize) {
+ ASSERT_TRUE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(ptr)));
+ }
+ // Visit again and make sure the cards got cleared back to their sane state.
+ visited.clear();
+ table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited);
+ // Verify that the dump matches what we saw earlier.
+ std::ostringstream oss2;
+ table->Dump(oss2);
+ ASSERT_EQ(oss.str(), oss2.str());
+ // Remove the space we added so it doesn't persist to the next test.
+ heap->RemoveSpace(other_space.get());
+}
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 84d5da3..bb9aae7 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -37,7 +37,7 @@
static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous("read barrier table", nullptr, capacity,
- PROT_READ | PROT_WRITE, false, &error_msg);
+ PROT_READ | PROT_WRITE, false, false, &error_msg);
CHECK(mem_map != nullptr && mem_map->Begin() != nullptr)
<< "couldn't allocate read barrier table: " << error_msg;
mem_map_.reset(mem_map);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index f5d3b47..ad8d988 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -63,7 +63,8 @@
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
- PROT_READ | PROT_WRITE, false, &error_msg));
+ PROT_READ | PROT_WRITE, false, false,
+ &error_msg));
if (UNLIKELY(mem_map.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
return nullptr;
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 7bc83ef..d6b3ed4 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -188,13 +188,6 @@
std::string Dump() const;
- const void* GetObjectWordAddress(const mirror::Object* obj) const {
- uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
- const uintptr_t offset = addr - heap_begin_;
- const size_t index = OffsetToIndex(offset);
- return &bitmap_begin_[index];
- }
-
private:
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 72aacf5..f51093a 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -80,8 +80,9 @@
size_t num_of_pages = footprint_ / kPageSize;
size_t max_num_of_pages = max_capacity_ / kPageSize;
std::string error_msg;
- page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
- PROT_READ | PROT_WRITE, false, &error_msg));
+ page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
+ RoundUp(max_num_of_pages, kPageSize),
+ PROT_READ | PROT_WRITE, false, false, &error_msg));
CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
page_map_ = page_map_mem_map_->Begin();
page_map_size_ = num_of_pages;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index cd63d26..8aac484 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -106,7 +106,7 @@
MemMap* mem_map = MemMap::MapAnonymous(
"mark sweep sweep array free buffer", nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
- PROT_READ | PROT_WRITE, false, &error_msg);
+ PROT_READ | PROT_WRITE, false, false, &error_msg);
CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
sweep_array_free_buffer_mem_map_.reset(mem_map);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 419d555..9343622 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -284,7 +284,8 @@
// address.
non_moving_space_mem_map.reset(
MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
- non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
+ non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
+ &error_str));
CHECK(non_moving_space_mem_map != nullptr) << error_str;
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
request_begin = reinterpret_cast<uint8_t*>(300 * MB);
@@ -398,14 +399,14 @@
rb_table_.reset(new accounting::ReadBarrierTable());
DCHECK(rb_table_->IsAllCleared());
}
-
- // Card cache for now since it makes it easier for us to update the references to the copying
- // spaces.
- accounting::ModUnionTable* mod_union_table =
- new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
- GetImageSpace());
- CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
- AddModUnionTable(mod_union_table);
+ if (GetImageSpace() != nullptr) {
+ // Don't add the image mod union table if we are running without an image, this can crash if
+ // we use the CardCache implementation.
+ accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
+ "Image mod-union table", this, GetImageSpace());
+ CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
+ AddModUnionTable(mod_union_table);
+ }
if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
accounting::RememberedSet* non_moving_space_rem_set =
new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
@@ -476,7 +477,7 @@
size_t capacity, std::string* out_error_str) {
while (true) {
MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
- PROT_READ | PROT_WRITE, true, out_error_str);
+ PROT_READ | PROT_WRITE, true, false, out_error_str);
if (map != nullptr || request_begin == nullptr) {
return map;
}
@@ -655,7 +656,7 @@
}
bool Heap::IsCompilingBoot() const {
- if (!Runtime::Current()->IsCompiler()) {
+ if (!Runtime::Current()->IsAotCompiler()) {
return false;
}
for (const auto& space : continuous_spaces_) {
@@ -1675,7 +1676,8 @@
AddSpace(to_space);
// Make sure that we will have enough room to copy.
CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
- Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
+ collector::GarbageCollector* collector = Compact(to_space, from_space,
+ kGcCauseHomogeneousSpaceCompact);
const uint64_t space_size_after_compaction = to_space->Size();
main_space_ = to_space;
main_space_backup_.reset(from_space);
@@ -1694,6 +1696,7 @@
// Finish GC.
reference_processor_.EnqueueClearedReferences(self);
GrowForUtilization(semi_space_collector_);
+ LogGC(kGcCauseHomogeneousSpaceCompact, collector);
FinishGC(self, collector::kGcTypeFull);
return HomogeneousSpaceCompactResult::kSuccess;
}
@@ -1744,6 +1747,7 @@
FinishGC(self, collector::kGcTypeNone);
return;
}
+ collector::GarbageCollector* collector = nullptr;
tl->SuspendAll();
switch (collector_type) {
case kCollectorTypeSS: {
@@ -1758,7 +1762,7 @@
bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
mem_map.release());
AddSpace(bump_pointer_space_);
- Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
+ collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
// Use the now empty main space mem map for the bump pointer temp space.
mem_map.reset(main_space_->ReleaseMemMap());
// Unset the pointers just in case.
@@ -1795,7 +1799,7 @@
mem_map.release();
// Compact to the main space from the bump pointer space, don't need to swap semispaces.
AddSpace(main_space_);
- Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
+ collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
mem_map.reset(bump_pointer_space_->ReleaseMemMap());
RemoveSpace(bump_pointer_space_);
bump_pointer_space_ = nullptr;
@@ -1826,6 +1830,8 @@
reference_processor_.EnqueueClearedReferences(self);
uint64_t duration = NanoTime() - start_time;
GrowForUtilization(semi_space_collector_);
+ DCHECK(collector != nullptr);
+ LogGC(kGcCauseCollectorTransition, collector);
FinishGC(self, collector::kGcTypeFull);
int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
int32_t delta_allocated = before_allocated - after_allocated;
@@ -2166,9 +2172,9 @@
std::swap(bump_pointer_space_, temp_space_);
}
-void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
- space::ContinuousMemMapAllocSpace* source_space,
- GcCause gc_cause) {
+collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
+ space::ContinuousMemMapAllocSpace* source_space,
+ GcCause gc_cause) {
CHECK(kMovingCollector);
if (target_space != source_space) {
// Don't swap spaces since this isn't a typical semi space collection.
@@ -2176,11 +2182,13 @@
semi_space_collector_->SetFromSpace(source_space);
semi_space_collector_->SetToSpace(target_space);
semi_space_collector_->Run(gc_cause, false);
+ return semi_space_collector_;
} else {
CHECK(target_space->IsBumpPointerSpace())
<< "In-place compaction is only supported for bump pointer spaces";
mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
+ return mark_compact_collector_;
}
}
@@ -2291,6 +2299,14 @@
reference_processor_.EnqueueClearedReferences(self);
// Grow the heap so that we know when to perform the next GC.
GrowForUtilization(collector, bytes_allocated_before_gc);
+ LogGC(gc_cause, collector);
+ FinishGC(self, gc_type);
+ // Inform DDMS that a GC completed.
+ Dbg::GcDidFinish();
+ return gc_type;
+}
+
+void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
const size_t duration = GetCurrentGcIteration()->GetDurationNs();
const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
// Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
@@ -2310,8 +2326,8 @@
const size_t total_memory = GetTotalMemory();
std::ostringstream pause_string;
for (size_t i = 0; i < pause_times.size(); ++i) {
- pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
- << ((i != pause_times.size() - 1) ? "," : "");
+ pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
+ << ((i != pause_times.size() - 1) ? "," : "");
}
LOG(INFO) << gc_cause << " " << collector->GetName()
<< " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
@@ -2323,10 +2339,6 @@
<< " total " << PrettyDuration((duration / 1000) * 1000);
VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
}
- FinishGC(self, gc_type);
- // Inform DDMS that a GC completed.
- Dbg::GcDidFinish();
- return gc_type;
}
void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 57c1460..b2478e6 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -666,12 +666,13 @@
class CollectorTransitionTask;
class HeapTrimTask;
- // Compact source space to target space.
- void Compact(space::ContinuousMemMapAllocSpace* target_space,
- space::ContinuousMemMapAllocSpace* source_space,
- GcCause gc_cause)
+ // Compact source space to target space. Returns the collector used.
+ collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
+ space::ContinuousMemMapAllocSpace* source_space,
+ GcCause gc_cause)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
// Create a mem map with a preferred base address.
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 9675ba6..fbfc449 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -29,7 +29,8 @@
capacity = RoundUp(capacity, kPageSize);
std::string error_msg;
std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
- PROT_READ | PROT_WRITE, true, &error_msg));
+ PROT_READ | PROT_WRITE, true, false,
+ &error_msg));
if (mem_map.get() == nullptr) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity) << " with message " << error_msg;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index d873e6d..14f770d 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -788,7 +788,7 @@
OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(),
image_header.GetOatFileBegin(),
- !Runtime::Current()->IsCompiler(), error_msg);
+ !Runtime::Current()->IsAotCompiler(), error_msg);
if (oat_file == NULL) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index c0c6444..7523de5 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -110,8 +110,8 @@
mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
size_t* bytes_allocated, size_t* usable_size) {
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
- PROT_READ | PROT_WRITE, true, &error_msg);
+ MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
+ PROT_READ | PROT_WRITE, true, false, &error_msg);
if (UNLIKELY(mem_map == NULL)) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
return NULL;
@@ -291,7 +291,7 @@
CHECK_EQ(size % kAlignment, 0U);
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
- PROT_READ | PROT_WRITE, true, &error_msg);
+ PROT_READ | PROT_WRITE, true, false, &error_msg);
CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
}
@@ -305,9 +305,10 @@
CHECK_ALIGNED(space_capacity, kAlignment);
const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
std::string error_msg;
- allocation_info_map_.reset(MemMap::MapAnonymous("large object free list space allocation info map",
- nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
- false, &error_msg));
+ allocation_info_map_.reset(
+ MemMap::MapAnonymous("large object free list space allocation info map",
+ nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
+ false, false, &error_msg));
CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
<< error_msg;
allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 9bbbb3c..67e8847 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -90,7 +90,7 @@
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, *capacity,
- PROT_READ | PROT_WRITE, true, &error_msg);
+ PROT_READ | PROT_WRITE, true, false, &error_msg);
if (mem_map == nullptr) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(*capacity) << ": " << error_msg;
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 2c556d9..8bb73d6 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -33,7 +33,8 @@
capacity = RoundUp(capacity, kRegionSize);
std::string error_msg;
std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
- PROT_READ | PROT_WRITE, true, &error_msg));
+ PROT_READ | PROT_WRITE, true, false,
+ &error_msg));
if (mem_map.get() == nullptr) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity) << " with message " << error_msg;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index aa2a6b5..1a3f107 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -74,7 +74,7 @@
std::string error_str;
const size_t table_bytes = maxCount * sizeof(IrtEntry);
table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
- PROT_READ | PROT_WRITE, false, &error_str));
+ PROT_READ | PROT_WRITE, false, false, &error_str));
CHECK(table_mem_map_.get() != nullptr) << error_str;
CHECK_EQ(table_mem_map_->Size(), table_bytes);
table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 90115c3..a054462 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -31,6 +31,8 @@
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc_root-inl.h"
#include "interpreter/interpreter.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -92,6 +94,16 @@
static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Runtime* const runtime = Runtime::Current();
+ jit::Jit* jit = runtime->GetJit();
+ if (jit != nullptr) {
+ const void* old_code_ptr = method->GetEntryPointFromQuickCompiledCode();
+ jit::JitCodeCache* code_cache = jit->GetCodeCache();
+ if (code_cache->ContainsCodePtr(old_code_ptr)) {
+ // Save the old compiled code since we need it to implement ClassLinker::GetQuickOatCodeFor.
+ code_cache->SaveCompiledCode(method, old_code_ptr);
+ }
+ }
method->SetEntryPointFromQuickCompiledCode(quick_code);
if (!method->IsResolutionMethod()) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -120,7 +132,8 @@
}
const void* new_quick_code;
bool uninstall = !entry_exit_stubs_installed_ && !interpreter_stubs_installed_;
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* const class_linker = runtime->GetClassLinker();
bool is_class_initialized = method->GetDeclaringClass()->IsInitialized();
if (uninstall) {
if ((forced_interpret_only_ || IsDeoptimized(method)) && !method->IsNative()) {
@@ -143,7 +156,6 @@
new_quick_code = GetQuickInstrumentationEntryPoint();
} else {
new_quick_code = class_linker->GetQuickOatCodeFor(method);
- DCHECK(!class_linker->IsQuickToInterpreterBridge(new_quick_code));
}
} else {
new_quick_code = GetQuickResolutionStub();
@@ -397,6 +409,10 @@
method_unwind_listeners_.push_back(listener);
have_method_unwind_listeners_ = true;
}
+ if ((events & kBackwardBranch) != 0) {
+ backward_branch_listeners_.push_back(listener);
+ have_backward_branch_listeners_ = true;
+ }
if ((events & kDexPcMoved) != 0) {
std::list<InstrumentationListener*>* modified;
if (have_dex_pc_listeners_) {
@@ -904,6 +920,13 @@
}
}
+void Instrumentation::BackwardBranchImpl(Thread* thread, mirror::ArtMethod* method,
+ int32_t offset) const {
+ for (InstrumentationListener* listener : backward_branch_listeners_) {
+ listener->BackwardBranch(thread, method, offset);
+ }
+}
+
void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
mirror::ArtField* field) const {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index cea0388..b667a40 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -94,6 +94,10 @@
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+
+ // Call-back for when we get a backward branch.
+ virtual void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
};
// Instrumentation is a catch-all for when extra information is required from the runtime. The
@@ -103,13 +107,14 @@
class Instrumentation {
public:
enum InstrumentationEvent {
- kMethodEntered = 1, // 1 << 0
- kMethodExited = 2, // 1 << 1
- kMethodUnwind = 4, // 1 << 2
- kDexPcMoved = 8, // 1 << 3
- kFieldRead = 16, // 1 << 4,
- kFieldWritten = 32, // 1 << 5
- kExceptionCaught = 64, // 1 << 6
+ kMethodEntered = 0x1,
+ kMethodExited = 0x2,
+ kMethodUnwind = 0x4,
+ kDexPcMoved = 0x8,
+ kFieldRead = 0x10,
+ kFieldWritten = 0x20,
+ kExceptionCaught = 0x40,
+ kBackwardBranch = 0x80,
};
Instrumentation();
@@ -244,6 +249,10 @@
return have_exception_caught_listeners_;
}
+ bool HasBackwardBranchListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return have_backward_branch_listeners_;
+ }
+
bool IsActive() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
@@ -284,6 +293,14 @@
}
}
+ // Inform listeners that a backward branch has been taken (only supported by the interpreter).
+ void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t offset) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (UNLIKELY(HasBackwardBranchListeners())) {
+ BackwardBranchImpl(thread, method, offset);
+ }
+ }
+
// Inform listeners that we read a field (only supported by the interpreter).
void FieldReadEvent(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
@@ -361,6 +378,8 @@
void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void BackwardBranchImpl(Thread* thread, mirror::ArtMethod* method, int32_t offset) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
mirror::ArtField* field) const
@@ -429,10 +448,14 @@
// Do we have any exception caught listeners? Short-cut to avoid taking the instrumentation_lock_.
bool have_exception_caught_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ // Do we have any backward branch listeners? Short-cut to avoid taking the instrumentation_lock_.
+ bool have_backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
+
// The event listeners, written to with the mutator_lock_ exclusively held.
std::list<InstrumentationListener*> method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ std::list<InstrumentationListener*> backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::shared_ptr<std::list<InstrumentationListener*>> dex_pc_listeners_
GUARDED_BY(Locks::mutator_lock_);
std::shared_ptr<std::list<InstrumentationListener*>> field_read_listeners_
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index a29558e..3ab7f30 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -16,6 +16,8 @@
#include "interpreter_common.h"
+#include <cmath>
+
#include "mirror/array-inl.h"
namespace art {
@@ -839,6 +841,23 @@
result->SetL(found);
}
+// Common helper for class-loading cutouts in an unstarted runtime. We call Runtime methods that
+// rely on Java code to wrap errors in the correct exception class (i.e., NoClassDefFoundError into
+// ClassNotFoundException), so need to do the same. The only exception is if the exception is
+// actually InternalError. This must not be wrapped, as it signals an initialization abort.
+static void CheckExceptionGenerateClassNotFound(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (self->IsExceptionPending()) {
+ // If it is not an InternalError, wrap it.
+ std::string type(PrettyTypeOf(self->GetException(nullptr)));
+ if (type != "java.lang.InternalError") {
+ self->ThrowNewWrappedException(self->GetCurrentLocationForThrow(),
+ "Ljava/lang/ClassNotFoundException;",
+ "ClassNotFoundException");
+ }
+ }
+}
+
static void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
JValue* result, size_t arg_offset) {
@@ -846,18 +865,34 @@
// problems in core libraries.
std::string name(PrettyMethod(shadow_frame->GetMethod()));
if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
- // TODO: Support for the other variants that take more arguments should also be added.
mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
StackHandleScope<1> hs(self);
Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
UnstartedRuntimeFindClass(self, h_class_name, NullHandle<mirror::ClassLoader>(), result, name,
- true, true);
- } else if (name == "java.lang.Class java.lang.VMClassLoader.loadClass(java.lang.String, boolean)") {
+ true, false);
+ CheckExceptionGenerateClassNotFound(self);
+ } else if (name == "java.lang.Class java.lang.Class.forName(java.lang.String, boolean, java.lang.ClassLoader)") {
mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
- StackHandleScope<1> hs(self);
+ bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
+ mirror::ClassLoader* class_loader =
+ down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
+ StackHandleScope<2> hs(self);
Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
- UnstartedRuntimeFindClass(self, h_class_name, NullHandle<mirror::ClassLoader>(), result, name,
- false, true);
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+ UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, initialize_class,
+ false);
+ CheckExceptionGenerateClassNotFound(self);
+ } else if (name == "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)") {
+ mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
+ mirror::ClassLoader* class_loader =
+ down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+ UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, initialize_class,
+ false);
+ CheckExceptionGenerateClassNotFound(self);
} else if (name == "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") {
mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
mirror::ClassLoader* class_loader =
@@ -866,17 +901,47 @@
Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, false, false);
+ // This might have an error pending. But semantics are to just return null.
+ if (self->IsExceptionPending()) {
+ // If it is an InternalError, keep it. See CheckExceptionGenerateClassNotFound.
+ std::string type(PrettyTypeOf(self->GetException(nullptr)));
+ if (type != "java.lang.InternalError") {
+ self->ClearException();
+ }
+ }
} else if (name == "java.lang.Class java.lang.Void.lookupType()") {
result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
} else if (name == "java.lang.Object java.lang.Class.newInstance()") {
+ StackHandleScope<2> hs(self);
Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- ArtMethod* c = klass->FindDeclaredDirectMethod("<init>", "()V");
- CHECK(c != NULL);
- StackHandleScope<1> hs(self);
- Handle<Object> obj(hs.NewHandle(klass->AllocObject(self)));
- CHECK(obj.Get() != NULL);
- EnterInterpreterFromInvoke(self, c, obj.Get(), NULL, NULL);
- result->SetL(obj.Get());
+ Handle<Class> h_klass(hs.NewHandle(klass));
+ // There are two situations in which we'll abort this run.
+ // 1) If the class isn't yet initialized and initialization fails.
+ // 2) If we can't find the default constructor. We'll postpone the exception to runtime.
+ // Note that 2) could likely be handled here, but for safety abort the transaction.
+ bool ok = false;
+ if (Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
+ ArtMethod* c = h_klass->FindDeclaredDirectMethod("<init>", "()V");
+ if (c != nullptr) {
+ Handle<Object> obj(hs.NewHandle(klass->AllocObject(self)));
+ CHECK(obj.Get() != nullptr); // We don't expect OOM at compile-time.
+ EnterInterpreterFromInvoke(self, c, obj.Get(), nullptr, nullptr);
+ result->SetL(obj.Get());
+ ok = true;
+ } else {
+ self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
+ "Could not find default constructor for '%s'",
+ PrettyClass(h_klass.Get()).c_str());
+ }
+ }
+ if (!ok) {
+ std::string error_msg = StringPrintf("Failed in Class.newInstance for '%s' with %s",
+ PrettyClass(h_klass.Get()).c_str(),
+ PrettyTypeOf(self->GetException(nullptr)).c_str());
+ self->ThrowNewWrappedException(self->GetCurrentLocationForThrow(),
+ "Ljava/lang/InternalError;",
+ error_msg.c_str());
+ }
} else if (name == "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") {
// Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
// going the reflective Dex way.
@@ -949,12 +1014,67 @@
"Unimplemented System.arraycopy for type '%s'",
PrettyDescriptor(ctype).c_str());
}
- } else if (name == "java.lang.Object java.lang.ThreadLocal.get()") {
+ } else if (name == "long java.lang.Double.doubleToRawLongBits(double)") {
+ double in = shadow_frame->GetVRegDouble(arg_offset);
+ result->SetJ(bit_cast<int64_t>(in));
+ } else if (name == "double java.lang.Math.ceil(double)") {
+ double in = shadow_frame->GetVRegDouble(arg_offset);
+ double out;
+ // Special cases:
+ // 1) NaN, infinity, +0, -0 -> out := in. All are guaranteed by cmath.
+ // -1 < in < 0 -> out := -0.
+ if (-1.0 < in && in < 0) {
+ out = -0.0;
+ } else {
+ out = ceil(in);
+ }
+ result->SetD(out);
+ } else if (name == "java.lang.Object java.lang.ThreadLocal.get()") {
std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
+ bool ok = false;
if (caller == "java.lang.String java.lang.IntegralToString.convertInt(java.lang.AbstractStringBuilder, int)") {
// Allocate non-threadlocal buffer.
result->SetL(mirror::CharArray::Alloc(self, 11));
- } else {
+ ok = true;
+ } else if (caller == "java.lang.RealToString java.lang.RealToString.getInstance()") {
+ // Note: RealToString is implemented and used in a different fashion than IntegralToString.
+ // Conversion is done over an actual object of RealToString (the conversion method is an
+ // instance method). This means it is not as clear whether it is correct to return a new
+ // object each time. The caller needs to be inspected by hand to see whether it (incorrectly)
+ // stores the object for later use.
+ // See also b/19548084 for a possible rewrite and bringing it in line with IntegralToString.
+ if (shadow_frame->GetLink()->GetLink() != nullptr) {
+ std::string caller2(PrettyMethod(shadow_frame->GetLink()->GetLink()->GetMethod()));
+ if (caller2 == "java.lang.String java.lang.Double.toString(double)") {
+ // Allocate new object.
+ mirror::Class* real_to_string_class =
+ shadow_frame->GetLink()->GetMethod()->GetDeclaringClass();
+ mirror::Object* real_to_string_obj = real_to_string_class->AllocObject(self);
+ if (real_to_string_obj != nullptr) {
+ mirror::ArtMethod* init_method =
+ real_to_string_class->FindDirectMethod("<init>", "()V");
+ if (init_method == nullptr) {
+ real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
+ }
+ JValue invoke_result;
+ // One arg, this.
+ uint32_t args = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(real_to_string_obj));
+ init_method->Invoke(self, &args, 4, &invoke_result, init_method->GetShorty());
+ if (!self->IsExceptionPending()) {
+ result->SetL(real_to_string_obj);
+ ok = true;
+ }
+ }
+
+ if (!ok) {
+ // We'll abort, so clear exception.
+ self->ClearException();
+ }
+ }
+ }
+ }
+
+ if (!ok) {
self->ThrowNewException(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
"Unimplemented ThreadLocal.get");
}
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index e4b3247..37324ea 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -54,6 +54,12 @@
#define UPDATE_HANDLER_TABLE() \
currentHandlersTable = handlersTable[Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
+#define BACKWARD_BRANCH_INSTRUMENTATION(offset) \
+ do { \
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); \
+ instrumentation->BackwardBranch(self, shadow_frame.GetMethod(), offset); \
+ } while (false)
+
#define UNREACHABLE_CODE_CHECK() \
do { \
if (kIsDebugBuild) { \
@@ -135,7 +141,7 @@
}
};
- const bool do_assignability_check = do_access_check;
+ constexpr bool do_assignability_check = do_access_check;
if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
LOG(FATAL) << "Invalid shadow frame for interpreter use";
return JValue();
@@ -608,6 +614,7 @@
HANDLE_INSTRUCTION_START(GOTO) {
int8_t offset = inst->VRegA_10t(inst_data);
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -620,6 +627,7 @@
HANDLE_INSTRUCTION_START(GOTO_16) {
int16_t offset = inst->VRegA_20t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -632,6 +640,7 @@
HANDLE_INSTRUCTION_START(GOTO_32) {
int32_t offset = inst->VRegA_30t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -644,6 +653,7 @@
HANDLE_INSTRUCTION_START(PACKED_SWITCH) {
int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -656,6 +666,7 @@
HANDLE_INSTRUCTION_START(SPARSE_SWITCH) {
int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -758,6 +769,7 @@
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -774,6 +786,7 @@
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -790,6 +803,7 @@
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -806,6 +820,7 @@
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -822,6 +837,7 @@
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -838,6 +854,7 @@
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -854,6 +871,7 @@
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -870,6 +888,7 @@
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -886,6 +905,7 @@
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -902,6 +922,7 @@
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -918,6 +939,7 @@
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -934,6 +956,7 @@
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index ea7c192..08332d3 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -337,7 +337,8 @@
thread_group = args->group;
}
- if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group, !runtime->IsCompiler())) {
+ if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group,
+ !runtime->IsAotCompiler())) {
*p_env = nullptr;
return JNI_ERR;
} else {
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index b71f6cd..fc08d23 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -633,7 +633,11 @@
AcquireJdwpTokenForEvent(threadId);
}
EventFinish(pReq);
- SuspendByPolicy(suspend_policy, thread_self_id);
+ {
+ // Before suspending, we change our state to kSuspended so the debugger sees us as RUNNING.
+ ScopedThreadStateChange stsc(self, kSuspended);
+ SuspendByPolicy(suspend_policy, thread_self_id);
+ }
self->TransitionFromSuspendedToRunnable();
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
new file mode 100644
index 0000000..539c181
--- /dev/null
+++ b/runtime/jit/jit.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit.h"
+
+#include <dlfcn.h>
+
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "interpreter/interpreter.h"
+#include "jit_code_cache.h"
+#include "jit_instrumentation.h"
+#include "mirror/art_method-inl.h"
+#include "runtime.h"
+#include "runtime_options.h"
+#include "thread_list.h"
+#include "utils.h"
+
+namespace art {
+namespace jit {
+
+JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
+ if (!options.GetOrDefault(RuntimeArgumentMap::UseJIT)) {
+ return nullptr;
+ }
+ auto* jit_options = new JitOptions;
+ jit_options->code_cache_capacity_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheCapacity);
+ jit_options->compile_threshold_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
+ return jit_options;
+}
+
+Jit::Jit()
+ : jit_library_handle_(nullptr), jit_compiler_handle_(nullptr), jit_load_(nullptr),
+ jit_compile_method_(nullptr) {
+}
+
+Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
+ std::unique_ptr<Jit> jit(new Jit);
+ if (!jit->LoadCompiler(error_msg)) {
+ return nullptr;
+ }
+ jit->code_cache_.reset(JitCodeCache::Create(options->GetCodeCacheCapacity(), error_msg));
+ if (jit->GetCodeCache() == nullptr) {
+ return nullptr;
+ }
+ LOG(INFO) << "JIT created with code_cache_capacity="
+ << PrettySize(options->GetCodeCacheCapacity())
+ << " compile_threshold=" << options->GetCompileThreshold();
+ return jit.release();
+}
+
+bool Jit::LoadCompiler(std::string* error_msg) {
+ jit_library_handle_ = dlopen(
+ kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
+ if (jit_library_handle_ == nullptr) {
+ std::ostringstream oss;
+ oss << "JIT could not load libart-compiler.so: " << dlerror();
+ *error_msg = oss.str();
+ return false;
+ }
+ jit_load_ = reinterpret_cast<void* (*)(CompilerCallbacks**)>(
+ dlsym(jit_library_handle_, "jit_load"));
+ if (jit_load_ == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT couldn't find jit_load entry point";
+ return false;
+ }
+ jit_unload_ = reinterpret_cast<void (*)(void*)>(
+ dlsym(jit_library_handle_, "jit_unload"));
+ if (jit_unload_ == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT couldn't find jit_unload entry point";
+ return false;
+ }
+ jit_compile_method_ = reinterpret_cast<bool (*)(void*, mirror::ArtMethod*, Thread*)>(
+ dlsym(jit_library_handle_, "jit_compile_method"));
+ if (jit_compile_method_ == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT couldn't find jit_compile_method entry point";
+ return false;
+ }
+ CompilerCallbacks* callbacks = nullptr;
+ VLOG(jit) << "Calling JitLoad interpreter_only="
+ << Runtime::Current()->GetInstrumentation()->InterpretOnly();
+ jit_compiler_handle_ = (jit_load_)(&callbacks);
+ if (jit_compiler_handle_ == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT couldn't load compiler";
+ return false;
+ }
+ if (callbacks == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT compiler callbacks were not set";
+ jit_compiler_handle_ = nullptr;
+ return false;
+ }
+ compiler_callbacks_ = callbacks;
+ return true;
+}
+
+bool Jit::CompileMethod(mirror::ArtMethod* method, Thread* self) {
+ DCHECK(!method->IsRuntimeMethod());
+ const bool result = jit_compile_method_(jit_compiler_handle_, method, self);
+ if (result) {
+ method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
+ }
+ return result;
+}
+
+void Jit::CreateThreadPool() {
+ CHECK(instrumentation_cache_.get() != nullptr);
+ instrumentation_cache_->CreateThreadPool();
+}
+
+void Jit::DeleteThreadPool() {
+ if (instrumentation_cache_.get() != nullptr) {
+ instrumentation_cache_->DeleteThreadPool();
+ }
+}
+
+Jit::~Jit() {
+ DeleteThreadPool();
+ if (jit_compiler_handle_ != nullptr) {
+ jit_unload_(jit_compiler_handle_);
+ }
+ if (jit_library_handle_ != nullptr) {
+ dlclose(jit_library_handle_);
+ }
+}
+
+void Jit::CreateInstrumentationCache(size_t compile_threshold) {
+ CHECK_GT(compile_threshold, 0U);
+ Runtime* const runtime = Runtime::Current();
+ runtime->GetThreadList()->SuspendAll();
+ // Add Jit interpreter instrumentation, tells the interpreter when to notify the jit to compile
+ // something.
+ instrumentation_cache_.reset(new jit::JitInstrumentationCache(compile_threshold));
+ runtime->GetInstrumentation()->AddListener(
+ new jit::JitInstrumentationListener(instrumentation_cache_.get()),
+ instrumentation::Instrumentation::kMethodEntered |
+ instrumentation::Instrumentation::kBackwardBranch);
+ runtime->GetThreadList()->ResumeAll();
+}
+
+} // namespace jit
+} // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
new file mode 100644
index 0000000..b80015f
--- /dev/null
+++ b/runtime/jit/jit.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_H_
+#define ART_RUNTIME_JIT_JIT_H_
+
+#include <unordered_map>
+
+#include "instrumentation.h"
+
+#include "atomic.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc_root.h"
+#include "jni.h"
+#include "object_callbacks.h"
+#include "thread_pool.h"
+
+namespace art {
+
+class CompilerCallbacks;
+struct RuntimeArgumentMap;
+
+namespace jit {
+
+class JitCodeCache;
+class JitInstrumentationCache;
+class JitOptions;
+
+class Jit {
+ public:
+ static constexpr bool kStressMode = kIsDebugBuild;
+ static constexpr size_t kDefaultCompileThreshold = kStressMode ? 1 : 1000;
+
+ virtual ~Jit();
+ static Jit* Create(JitOptions* options, std::string* error_msg);
+ bool CompileMethod(mirror::ArtMethod* method, Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CreateInstrumentationCache(size_t compile_threshold);
+ void CreateThreadPool();
+ CompilerCallbacks* GetCompilerCallbacks() {
+ return compiler_callbacks_;
+ }
+ const JitCodeCache* GetCodeCache() const {
+ return code_cache_.get();
+ }
+ JitCodeCache* GetCodeCache() {
+ return code_cache_.get();
+ }
+ void DeleteThreadPool();
+
+ private:
+ Jit();
+ bool LoadCompiler(std::string* error_msg);
+
+ // JIT compiler
+ void* jit_library_handle_;
+ void* jit_compiler_handle_;
+ void* (*jit_load_)(CompilerCallbacks**);
+ void (*jit_unload_)(void*);
+ bool (*jit_compile_method_)(void*, mirror::ArtMethod*, Thread*);
+
+ std::unique_ptr<jit::JitInstrumentationCache> instrumentation_cache_;
+ std::unique_ptr<jit::JitCodeCache> code_cache_;
+ CompilerCallbacks* compiler_callbacks_; // Owned by the jit compiler.
+};
+
+class JitOptions {
+ public:
+ static JitOptions* CreateFromRuntimeArguments(const RuntimeArgumentMap& options);
+ size_t GetCompileThreshold() const {
+ return compile_threshold_;
+ }
+ size_t GetCodeCacheCapacity() const {
+ return code_cache_capacity_;
+ }
+
+ private:
+ size_t code_cache_capacity_;
+ size_t compile_threshold_;
+
+ JitOptions() : code_cache_capacity_(0), compile_threshold_(0) {
+ }
+};
+
+} // namespace jit
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_JIT_H_
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
new file mode 100644
index 0000000..4ae4d57
--- /dev/null
+++ b/runtime/jit/jit_code_cache.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit_code_cache.h"
+
+#include <sstream>
+
+#include "mem_map.h"
+#include "mirror/art_method-inl.h"
+#include "oat_file-inl.h"
+
+namespace art {
+namespace jit {
+
+JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
+ CHECK_GT(capacity, 0U);
+ CHECK_LT(capacity, kMaxCapacity);
+ std::string error_str;
+ // Map name specific for android_os_Debug.cpp accounting.
+ MemMap* map = MemMap::MapAnonymous("jit-code-cache", nullptr, capacity,
+ PROT_READ | PROT_WRITE | PROT_EXEC, false, false, &error_str);
+ if (map == nullptr) {
+ std::ostringstream oss;
+ oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
+ *error_msg = oss.str();
+ return nullptr;
+ }
+ return new JitCodeCache(map);
+}
+
+JitCodeCache::JitCodeCache(MemMap* mem_map)
+ : lock_("Jit code cache", kJitCodeCacheLock), num_methods_(0) {
+ VLOG(jit) << "Created jit code cache size=" << PrettySize(mem_map->Size());
+ mem_map_.reset(mem_map);
+ uint8_t* divider = mem_map->Begin() + RoundUp(mem_map->Size() / 4, kPageSize);
+ // Data cache is 1 / 4 of the map. TODO: Make this variable?
+ // Put data at the start.
+ data_cache_ptr_ = mem_map->Begin();
+ data_cache_end_ = divider;
+ data_cache_begin_ = data_cache_ptr_;
+ mprotect(data_cache_ptr_, data_cache_end_ - data_cache_begin_, PROT_READ | PROT_WRITE);
+ // Code cache after.
+ code_cache_begin_ = divider;
+ code_cache_ptr_ = divider;
+ code_cache_end_ = mem_map->End();
+}
+
+bool JitCodeCache::ContainsMethod(mirror::ArtMethod* method) const {
+ return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode());
+}
+
+bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
+ return ptr >= code_cache_begin_ && ptr < code_cache_end_;
+}
+
+void JitCodeCache::FlushInstructionCache() {
+ UNIMPLEMENTED(FATAL);
+ // TODO: Investigate if we need to do this.
+ // __clear_cache(reinterpret_cast<char*>(code_cache_begin_), static_cast<int>(CodeCacheSize()));
+}
+
+uint8_t* JitCodeCache::ReserveCode(Thread* self, size_t size) {
+ MutexLock mu(self, lock_);
+ if (size > CodeCacheRemain()) {
+ return nullptr;
+ }
+ code_cache_ptr_ += size;
+ return code_cache_ptr_ - size;
+}
+
+uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
+ MutexLock mu(self, lock_);
+ const size_t size = end - begin;
+ if (size > DataCacheRemain()) {
+ return nullptr; // Out of space in the data cache.
+ }
+ std::copy(begin, end, data_cache_ptr_);
+ data_cache_ptr_ += size;
+ return data_cache_ptr_ - size;
+}
+
+const void* JitCodeCache::GetCodeFor(mirror::ArtMethod* method) {
+ const void* code = method->GetEntryPointFromQuickCompiledCode();
+ if (ContainsCodePtr(code)) {
+ return code;
+ }
+ MutexLock mu(Thread::Current(), lock_);
+ auto it = method_code_map_.find(method);
+ if (it != method_code_map_.end()) {
+ return it->second;
+ }
+ return nullptr;
+}
+
+void JitCodeCache::SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr) {
+ DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr);
+ DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr="
+ << old_code_ptr;
+ MutexLock mu(Thread::Current(), lock_);
+ auto it = method_code_map_.find(method);
+ if (it != method_code_map_.end()) {
+ return;
+ }
+ method_code_map_.Put(method, old_code_ptr);
+}
+
+} // namespace jit
+} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
new file mode 100644
index 0000000..aa8c717
--- /dev/null
+++ b/runtime/jit/jit_code_cache.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
+#define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
+
+#include "instrumentation.h"
+
+#include "atomic.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc_root.h"
+#include "jni.h"
+#include "oat_file.h"
+#include "object_callbacks.h"
+#include "safe_map.h"
+#include "thread_pool.h"
+
+namespace art {
+
+class CompiledMethod;
+class CompilerCallbacks;
+
+namespace mirror {
+class ArtMethod;
+} // namespcae mirror
+
+namespace jit {
+
+class JitInstrumentationCache;
+
+class JitCodeCache {
+ public:
+ static constexpr size_t kMaxCapacity = 1 * GB;
+ static constexpr size_t kDefaultCapacity = 2 * MB;
+
+ static JitCodeCache* Create(size_t capacity, std::string* error_msg);
+
+ const uint8_t* CodeCachePtr() const {
+ return code_cache_ptr_;
+ }
+ size_t CodeCacheSize() const {
+ return code_cache_ptr_ - code_cache_begin_;
+ }
+ size_t CodeCacheRemain() const {
+ return code_cache_end_ - code_cache_ptr_;
+ }
+ size_t DataCacheSize() const {
+ return data_cache_ptr_ - data_cache_begin_;
+ }
+ size_t DataCacheRemain() const {
+ return data_cache_end_ - data_cache_ptr_;
+ }
+ size_t NumMethods() const {
+ return num_methods_;
+ }
+
+ bool ContainsMethod(mirror::ArtMethod* method) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ContainsCodePtr(const void* ptr) const;
+
+ uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
+
+ uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
+ LOCKS_EXCLUDED(lock_);
+
+ // Get code for a method, returns null if it is not in the jit cache.
+ const void* GetCodeFor(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+
+ void SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+
+ private:
+ // Takes ownership of code_mem_map.
+ explicit JitCodeCache(MemMap* code_mem_map);
+ void FlushInstructionCache();
+
+ Mutex lock_;
+ // Mem map which holds code and data. We do this since we need to have 32 bit offsets from method
+ // headers in code cache which point to things in the data cache. If the maps are more than 4GB
+ // apart, having multiple maps wouldn't work.
+ std::unique_ptr<MemMap> mem_map_;
+ // Code cache section.
+ uint8_t* code_cache_ptr_;
+ const uint8_t* code_cache_begin_;
+ const uint8_t* code_cache_end_;
+ // Data cache section.
+ uint8_t* data_cache_ptr_;
+ const uint8_t* data_cache_begin_;
+ const uint8_t* data_cache_end_;
+ size_t num_methods_;
+ // TODO: This relies on methods not moving.
+ // This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
+ // required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
+ SafeMap<mirror::ArtMethod*, const void*> method_code_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
+};
+
+
+} // namespace jit
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
new file mode 100644
index 0000000..160e678
--- /dev/null
+++ b/runtime/jit/jit_instrumentation.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit_instrumentation.h"
+
+#include "jit.h"
+#include "jit_code_cache.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+namespace jit {
+
+class JitCompileTask : public Task {
+ public:
+ explicit JitCompileTask(mirror::ArtMethod* method, JitInstrumentationCache* cache)
+ : method_(method), cache_(cache) {
+ }
+
+ virtual void Run(Thread* self) OVERRIDE {
+ ScopedObjectAccess soa(self);
+ VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
+ if (Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
+ cache_->SignalCompiled(self, method_);
+ } else {
+ VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
+ }
+ }
+
+ virtual void Finalize() OVERRIDE {
+ delete this;
+ }
+
+ private:
+ mirror::ArtMethod* const method_;
+ JitInstrumentationCache* const cache_;
+};
+
+JitInstrumentationCache::JitInstrumentationCache(size_t hot_method_threshold)
+ : lock_("jit instrumentation lock"), hot_method_threshold_(hot_method_threshold) {
+}
+
+void JitInstrumentationCache::CreateThreadPool() {
+ thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
+}
+
+void JitInstrumentationCache::DeleteThreadPool() {
+ thread_pool_.reset();
+}
+
+void JitInstrumentationCache::SignalCompiled(Thread* self, mirror::ArtMethod* method) {
+ ScopedObjectAccessUnchecked soa(self);
+ jmethodID method_id = soa.EncodeMethod(method);
+ MutexLock mu(self, lock_);
+ auto it = samples_.find(method_id);
+ if (it != samples_.end()) {
+ samples_.erase(it);
+ }
+}
+
+void JitInstrumentationCache::AddSamples(Thread* self, mirror::ArtMethod* method, size_t count) {
+ ScopedObjectAccessUnchecked soa(self);
+ // Since we don't have on-stack replacement, some methods can remain in the interpreter longer
+ // than we want resulting in samples even after the method is compiled.
+ if (method->IsClassInitializer() ||
+ Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) {
+ return;
+ }
+ jmethodID method_id = soa.EncodeMethod(method);
+ bool is_hot = false;
+ {
+ MutexLock mu(self, lock_);
+ size_t sample_count = 0;
+ auto it = samples_.find(method_id);
+ if (it != samples_.end()) {
+ it->second += count;
+ sample_count = it->second;
+ } else {
+ sample_count = count;
+ samples_.insert(std::make_pair(method_id, count));
+ }
+ // If we have enough samples, mark as hot and request Jit compilation.
+ if (sample_count >= hot_method_threshold_ && sample_count - count < hot_method_threshold_) {
+ is_hot = true;
+ }
+ }
+ if (is_hot) {
+ if (thread_pool_.get() != nullptr) {
+ thread_pool_->AddTask(self, new JitCompileTask(method->GetInterfaceMethodIfProxy(), this));
+ thread_pool_->StartWorkers(self);
+ } else {
+ VLOG(jit) << "Compiling hot method " << PrettyMethod(method);
+ Runtime::Current()->GetJit()->CompileMethod(method->GetInterfaceMethodIfProxy(), self);
+ }
+ }
+}
+
+JitInstrumentationListener::JitInstrumentationListener(JitInstrumentationCache* cache)
+ : instrumentation_cache_(cache) {
+ CHECK(instrumentation_cache_ != nullptr);
+}
+
+} // namespace jit
+} // namespace art
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
new file mode 100644
index 0000000..9576f4b
--- /dev/null
+++ b/runtime/jit/jit_instrumentation.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_INSTRUMENTATION_H_
+#define ART_RUNTIME_JIT_JIT_INSTRUMENTATION_H_
+
+#include <unordered_map>
+
+#include "instrumentation.h"
+
+#include "atomic.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc_root.h"
+#include "jni.h"
+#include "object_callbacks.h"
+#include "thread_pool.h"
+
+namespace art {
+namespace mirror {
+ class ArtField;
+ class ArtMethod;
+ class Class;
+ class Object;
+ class Throwable;
+} // namespace mirror
+union JValue;
+class Thread;
+class ThrowLocation;
+
+namespace jit {
+
+// Keeps track of which methods are hot.
+class JitInstrumentationCache {
+ public:
+ explicit JitInstrumentationCache(size_t hot_method_threshold);
+ void AddSamples(Thread* self, mirror::ArtMethod* method, size_t samples)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SignalCompiled(Thread* self, mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CreateThreadPool();
+ void DeleteThreadPool();
+
+ private:
+ Mutex lock_;
+ std::unordered_map<jmethodID, size_t> samples_;
+ size_t hot_method_threshold_;
+ std::unique_ptr<ThreadPool> thread_pool_;
+};
+
+class JitInstrumentationListener : public instrumentation::InstrumentationListener {
+ public:
+ explicit JitInstrumentationListener(JitInstrumentationCache* cache);
+
+ virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* method, uint32_t /*dex_pc*/)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ instrumentation_cache_->AddSamples(thread, method, 1);
+ }
+ virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ const JValue& /*return_value*/)
+ OVERRIDE { }
+ virtual void MethodUnwind(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { }
+ virtual void FieldRead(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ mirror::ArtField* /*field*/) OVERRIDE { }
+ virtual void FieldWritten(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ mirror::ArtField* /*field*/, const JValue& /*field_value*/)
+ OVERRIDE { }
+ virtual void ExceptionCaught(Thread* /*thread*/, const ThrowLocation& /*throw_location*/,
+ mirror::ArtMethod* /*catch_method*/, uint32_t /*catch_dex_pc*/,
+ mirror::Throwable* /*exception_object*/) OVERRIDE { }
+
+ virtual void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { }
+
+ // We only care about how many dex instructions were executed in the Jit.
+ virtual void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK_LE(dex_pc_offset, 0);
+ instrumentation_cache_->AddSamples(thread, method, 1);
+ }
+
+ private:
+ JitInstrumentationCache* const instrumentation_cache_;
+};
+
+} // namespace jit
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_JIT_INSTRUMENTATION_H_
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index a722813..588615f 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -138,9 +138,10 @@
#endif
// Return true if the address range is contained in a single /proc/self/map entry.
-static bool ContainedWithinExistingMap(uintptr_t begin,
- uintptr_t end,
+static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size,
std::string* error_msg) {
+ uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
+ uintptr_t end = begin + size;
std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
if (map.get() == nullptr) {
*error_msg = StringPrintf("Failed to build process map");
@@ -152,11 +153,9 @@
return true;
}
}
- std::string maps;
- ReadFileToString("/proc/self/maps", &maps);
+ PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
*error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
- "any existing map:\n%s\n",
- begin, end, maps.c_str());
+ "any existing map. See process maps in the log.", begin, end);
return false;
}
@@ -242,7 +241,7 @@
}
MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
- bool low_4gb, std::string* error_msg) {
+ bool low_4gb, bool reuse, std::string* error_msg) {
#ifndef __LP64__
UNUSED(low_4gb);
#endif
@@ -252,6 +251,15 @@
size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ if (reuse) {
+ // reuse means it is okay that it overlaps an existing page mapping.
+ // Only use this if you actually made the page reservation yourself.
+ CHECK(expected_ptr != nullptr);
+
+ DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << error_msg;
+ flags |= MAP_FIXED;
+ }
+
ScopedFd fd(-1);
#ifdef USE_ASHMEM
@@ -275,7 +283,7 @@
*error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
return nullptr;
}
- flags = MAP_PRIVATE;
+ flags &= ~MAP_ANONYMOUS;
}
#endif
@@ -375,12 +383,11 @@
#endif
if (actual == MAP_FAILED) {
- std::string maps;
- ReadFileToString("/proc/self/maps", &maps);
+ PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
- *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
- expected_ptr, page_aligned_byte_count, prot, flags, fd.get(),
- strerror(saved_errno), maps.c_str());
+ *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. See process "
+ "maps in the log.", expected_ptr, page_aligned_byte_count, prot,
+ flags, fd.get(), strerror(saved_errno));
return nullptr;
}
std::ostringstream check_map_request_error_msg;
@@ -396,8 +403,6 @@
std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
- uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
- uintptr_t limit = expected + byte_count;
// Note that we do not allow MAP_FIXED unless reuse == true, i.e we
// expect his mapping to be contained within an existing map.
@@ -406,7 +411,7 @@
// Only use this if you actually made the page reservation yourself.
CHECK(expected_ptr != nullptr);
- DCHECK(ContainedWithinExistingMap(expected, limit, error_msg));
+ DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << error_msg;
flags |= MAP_FIXED;
} else {
CHECK_EQ(0, flags & MAP_FIXED);
@@ -435,14 +440,13 @@
if (actual == MAP_FAILED) {
auto saved_errno = errno;
- std::string maps;
- ReadFileToString("/proc/self/maps", &maps);
+ PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
*error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
- ") of file '%s' failed: %s\n%s",
+ ") of file '%s' failed: %s. See process maps in the log.",
page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
static_cast<int64_t>(page_aligned_offset), filename,
- strerror(saved_errno), maps.c_str());
+ strerror(saved_errno));
return nullptr;
}
std::ostringstream check_map_request_error_msg;
@@ -544,11 +548,9 @@
// Unmap/map the tail region.
int result = munmap(tail_base_begin, tail_base_size);
if (result == -1) {
- std::string maps;
- ReadFileToString("/proc/self/maps", &maps);
- *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s",
- tail_base_begin, tail_base_size, name_.c_str(),
- maps.c_str());
+ PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+ *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
+ tail_base_begin, tail_base_size, name_.c_str());
return nullptr;
}
// Don't cause memory allocation between the munmap and the mmap
@@ -558,11 +560,10 @@
uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot,
flags, fd.get(), 0));
if (actual == MAP_FAILED) {
- std::string maps;
- ReadFileToString("/proc/self/maps", &maps);
- *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed\n%s",
- tail_base_begin, tail_base_size, tail_prot, flags, fd.get(),
- maps.c_str());
+ PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+ *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
+ "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
+ fd.get());
return nullptr;
}
return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index dc337e0..11b2569 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -54,6 +54,7 @@
public:
// Request an anonymous region of length 'byte_count' and a requested base address.
// Use NULL as the requested base address if you don't care.
+ // "reuse" allows re-mapping an address range from an existing mapping.
//
// The word "anonymous" in this context means "not backed by a file". The supplied
// 'ashmem_name' will be used -- on systems that support it -- to give the mapping
@@ -61,7 +62,7 @@
//
// On success, returns returns a MemMap instance. On failure, returns a NULL;
static MemMap* MapAnonymous(const char* ashmem_name, uint8_t* addr, size_t byte_count, int prot,
- bool low_4gb, std::string* error_msg);
+ bool low_4gb, bool reuse, std::string* error_msg);
// Map part of a file, taking care of non-page aligned offsets. The
// "start" offset is absolute, not relative.
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 14a72b9..f635b5d 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -43,6 +43,7 @@
2 * page_size,
PROT_READ | PROT_WRITE,
low_4gb,
+ false,
&error_msg);
// Check its state and write to it.
uint8_t* base0 = m0->Begin();
@@ -129,11 +130,12 @@
CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
- nullptr,
- 0,
- PROT_READ,
- false,
- &error_msg));
+ nullptr,
+ 0,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
ASSERT_TRUE(map.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
@@ -141,6 +143,7 @@
kPageSize,
PROT_READ | PROT_WRITE,
false,
+ false,
&error_msg));
ASSERT_TRUE(map.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -151,11 +154,12 @@
CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
- nullptr,
- kPageSize,
- PROT_READ | PROT_WRITE,
- true,
- &error_msg));
+ nullptr,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ true,
+ false,
+ &error_msg));
ASSERT_TRUE(map.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
@@ -167,31 +171,34 @@
std::string error_msg;
// Map at an address that should work, which should succeed.
std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
- reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- &error_msg));
+ reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
ASSERT_TRUE(map0.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
ASSERT_TRUE(map0->BaseBegin() == reinterpret_cast<void*>(ART_BASE_ADDRESS));
// Map at an unspecified address, which should succeed.
std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
- nullptr,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- &error_msg));
+ nullptr,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
ASSERT_TRUE(map1.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
ASSERT_TRUE(map1->BaseBegin() != nullptr);
// Attempt to map at the same address, which should fail.
std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
- reinterpret_cast<uint8_t*>(map1->BaseBegin()),
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- &error_msg));
+ reinterpret_cast<uint8_t*>(map1->BaseBegin()),
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
ASSERT_TRUE(map2.get() == nullptr) << error_msg;
ASSERT_TRUE(!error_msg.empty());
}
@@ -217,6 +224,7 @@
0x21000000,
PROT_READ | PROT_WRITE,
true,
+ false,
&error_msg));
ASSERT_TRUE(map.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -230,11 +238,12 @@
uintptr_t ptr = 0;
ptr -= kPageSize; // Now it's close to the top.
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
- reinterpret_cast<uint8_t*>(ptr),
- 2 * kPageSize, // brings it over the top.
- PROT_READ | PROT_WRITE,
- false,
- &error_msg));
+ reinterpret_cast<uint8_t*>(ptr),
+ 2 * kPageSize, // brings it over the top.
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
ASSERT_EQ(nullptr, map.get());
ASSERT_FALSE(error_msg.empty());
}
@@ -243,12 +252,14 @@
TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
- reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
- kPageSize,
- PROT_READ | PROT_WRITE,
- true,
- &error_msg));
+ std::unique_ptr<MemMap> map(
+ MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
+ reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ true,
+ false,
+ &error_msg));
ASSERT_EQ(nullptr, map.get());
ASSERT_FALSE(error_msg.empty());
}
@@ -257,16 +268,40 @@
CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
- reinterpret_cast<uint8_t*>(0xF0000000),
- 0x20000000,
- PROT_READ | PROT_WRITE,
- true,
- &error_msg));
+ reinterpret_cast<uint8_t*>(0xF0000000),
+ 0x20000000,
+ PROT_READ | PROT_WRITE,
+ true,
+ false,
+ &error_msg));
ASSERT_EQ(nullptr, map.get());
ASSERT_FALSE(error_msg.empty());
}
#endif
+TEST_F(MemMapTest, MapAnonymousReuse) {
+ CommonInit();
+ std::string error_msg;
+ std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
+ nullptr,
+ 0x20000,
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
+ ASSERT_NE(nullptr, map.get());
+ ASSERT_TRUE(error_msg.empty());
+ std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
+ reinterpret_cast<uint8_t*>(map->BaseBegin()),
+ 0x10000,
+ PROT_READ | PROT_WRITE,
+ false,
+ true,
+ &error_msg));
+ ASSERT_NE(nullptr, map2.get());
+ ASSERT_TRUE(error_msg.empty());
+}
+
TEST_F(MemMapTest, CheckNoGaps) {
CommonInit();
std::string error_msg;
@@ -277,6 +312,7 @@
kPageSize * kNumPages,
PROT_READ | PROT_WRITE,
false,
+ false,
&error_msg));
ASSERT_TRUE(map.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -292,6 +328,7 @@
kPageSize,
PROT_READ | PROT_WRITE,
false,
+ false,
&error_msg));
ASSERT_TRUE(map0.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -300,6 +337,7 @@
kPageSize,
PROT_READ | PROT_WRITE,
false,
+ false,
&error_msg));
ASSERT_TRUE(map1.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -308,6 +346,7 @@
kPageSize,
PROT_READ | PROT_WRITE,
false,
+ false,
&error_msg));
ASSERT_TRUE(map2.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index 5a4ebd1..3cea4a1 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -44,7 +44,7 @@
void ArtField::SetOffset(MemberOffset num_bytes) {
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
- if (kIsDebugBuild && Runtime::Current()->IsCompiler() &&
+ if (kIsDebugBuild && Runtime::Current()->IsAotCompiler() &&
!Runtime::Current()->UseCompileTimeClassPath()) {
Primitive::Type type = GetTypeAsPrimitiveType();
if (type == Primitive::kPrimDouble || type == Primitive::kPrimLong) {
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 7d31148..c27c6e9 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -147,7 +147,10 @@
inline uint32_t ArtMethod::GetCodeSize() {
DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
- const void* code = EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode());
+ return GetCodeSize(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode()));
+}
+
+inline uint32_t ArtMethod::GetCodeSize(const void* code) {
if (code == nullptr) {
return 0u;
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index b2016dc..26f6f34 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -27,6 +27,8 @@
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "jni_internal.h"
#include "mapping_table.h"
#include "object_array-inl.h"
@@ -229,6 +231,7 @@
if (abort_on_failure) {
LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
<< "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
+ << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
<< ") in " << PrettyMethod(this);
}
return DexFile::kDexNoIndex;
@@ -329,6 +332,13 @@
class_linker->IsQuickResolutionStub(code)) {
return;
}
+ // If we are the JIT then we may have just compiled the method after the
+ // IsQuickToInterpreterBridge check.
+ jit::Jit* const jit = Runtime::Current()->GetJit();
+ if (jit != nullptr &&
+ jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
+ return;
+ }
/*
* During a stack walk, a return PC may point past-the-end of the code
* in the case that the last instruction is a call that isn't expected to
@@ -336,11 +346,12 @@
*
* NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
*/
- CHECK(PcIsWithinQuickCode(pc))
+ CHECK(PcIsWithinQuickCode(reinterpret_cast<uintptr_t>(code), pc))
<< PrettyMethod(this)
<< " pc=" << std::hex << pc
<< " code=" << code
- << " size=" << GetCodeSize();
+ << " size=" << GetCodeSize(
+ EntryPointToCodePointer(reinterpret_cast<const void*>(code)));
}
bool ArtMethod::IsEntrypointInterpreter() {
@@ -410,7 +421,8 @@
}
// Ensure that we won't be accidentally calling quick compiled code when -Xint.
- if (kIsDebugBuild && Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly()) {
+ if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
+ DCHECK(!runtime->UseJit());
CHECK(IsEntrypointInterpreter())
<< "Don't call compiled code when -Xint " << PrettyMethod(this);
}
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index f33ca94..d878f25 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -305,18 +305,8 @@
// quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
// debug purposes.
bool PcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode());
- if (code == 0) {
- return pc == 0;
- }
- /*
- * During a stack walk, a return PC may point past-the-end of the code
- * in the case that the last instruction is a call that isn't expected to
- * return. Thus, we check <= code + GetCodeSize().
- *
- * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
- */
- return code <= pc && pc <= code + GetCodeSize();
+ return PcIsWithinQuickCode(
+ reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc);
}
void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -622,6 +612,24 @@
return offset;
}
+ // Code points to the start of the quick code.
+ static uint32_t GetCodeSize(const void* code);
+
+ static bool PcIsWithinQuickCode(uintptr_t code, uintptr_t pc) {
+ if (code == 0) {
+ return pc == 0;
+ }
+ /*
+ * During a stack walk, a return PC may point past-the-end of the code
+ * in the case that the last instruction is a call that isn't expected to
+ * return. Thus, we check <= code + GetCodeSize().
+ *
+ * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
+ */
+ return code <= pc && pc <= code + GetCodeSize(
+ EntryPointToCodePointer(reinterpret_cast<const void*>(code)));
+ }
+
friend struct art::ArtMethodOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
};
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 037072d..e1fe3eb 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -58,6 +58,70 @@
namespace art {
+static std::unique_ptr<std::vector<const DexFile*>>
+ConvertJavaArrayToNative(JNIEnv* env, jobject arrayObject) {
+ jarray array = reinterpret_cast<jarray>(arrayObject);
+
+ jsize array_size = env->GetArrayLength(array);
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ return std::unique_ptr<std::vector<const DexFile*>>();
+ }
+
+ // TODO: Optimize. On 32bit we can use an int array.
+ jboolean is_long_data_copied;
+ jlong* long_data = env->GetLongArrayElements(reinterpret_cast<jlongArray>(array),
+ &is_long_data_copied);
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ return std::unique_ptr<std::vector<const DexFile*>>();
+ }
+
+ std::unique_ptr<std::vector<const DexFile*>> ret(new std::vector<const DexFile*>());
+ ret->reserve(array_size);
+ for (jsize i = 0; i < array_size; ++i) {
+ ret->push_back(reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(*(long_data + i))));
+ }
+
+ env->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array), long_data, JNI_ABORT);
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ return std::unique_ptr<std::vector<const DexFile*>>();
+ }
+
+ return ret;
+}
+
+static jlongArray ConvertNativeToJavaArray(JNIEnv* env,
+ std::vector<std::unique_ptr<const DexFile>>& vec) {
+ size_t vec_size = vec.size();
+ jlongArray long_array = env->NewLongArray(static_cast<jsize>(vec_size));
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ return nullptr;
+ }
+
+ jboolean is_long_data_copied;
+ jlong* long_data = env->GetLongArrayElements(long_array, &is_long_data_copied);
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ return nullptr;
+ }
+
+ jlong* tmp = long_data;
+ for (auto& dex_file : vec) {
+ *tmp = reinterpret_cast<uintptr_t>(dex_file.get());
+ tmp++;
+ }
+
+ env->ReleaseLongArrayElements(long_array, long_data, 0);
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ return nullptr;
+ }
+
+ // Now release all the unique_ptrs.
+ for (auto& dex_file : vec) {
+ dex_file.release();
+ }
+
+ return long_array;
+}
+
// A smart pointer that provides read-only access to a Java string's UTF chars.
// Unlike libcore's NullableScopedUtfChars, this will *not* throw NullPointerException if
// passed a null jstring. The correct idiom is:
@@ -104,7 +168,7 @@
void operator=(const NullableScopedUtfChars&);
};
-static jlong DexFile_openDexFileNative(JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
+static jobject DexFile_openDexFileNative(JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
ScopedUtfChars sourceName(env, javaSourceName);
if (sourceName.c_str() == NULL) {
return 0;
@@ -115,20 +179,26 @@
}
ClassLinker* linker = Runtime::Current()->GetClassLinker();
- std::unique_ptr<std::vector<std::unique_ptr<const DexFile>>> dex_files(
- new std::vector<std::unique_ptr<const DexFile>>());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
bool success = linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), &error_msgs,
- dex_files.get());
+ &dex_files);
- if (success || !dex_files->empty()) {
- // In the case of non-success, we have not found or could not generate the oat file.
- // But we may still have found a dex file that we can use.
- return static_cast<jlong>(reinterpret_cast<uintptr_t>(dex_files.release()));
+ if (success || !dex_files.empty()) {
+ jlongArray array = ConvertNativeToJavaArray(env, dex_files);
+ if (array == nullptr) {
+ ScopedObjectAccess soa(env);
+ for (auto& dex_file : dex_files) {
+ if (Runtime::Current()->GetClassLinker()->IsDexFileRegistered(*dex_file)) {
+ dex_file.release();
+ }
+ }
+ }
+ return array;
} else {
// The vector should be empty after a failed loading attempt.
- DCHECK_EQ(0U, dex_files->size());
+ DCHECK_EQ(0U, dex_files.size());
ScopedObjectAccess soa(env);
CHECK(!error_msgs.empty());
@@ -140,27 +210,17 @@
ThrowWrappedIOException("%s", it->c_str());
}
- return 0;
+ return nullptr;
}
}
-static std::vector<std::unique_ptr<const DexFile>>*
-toDexFiles(jlong dex_file_address, JNIEnv* env) {
- std::vector<std::unique_ptr<const DexFile>>* dex_files
- = reinterpret_cast<std::vector<std::unique_ptr<const DexFile>>*>(
- static_cast<uintptr_t>(dex_file_address));
- if (UNLIKELY(dex_files == nullptr)) {
- ScopedObjectAccess soa(env);
- ThrowNullPointerException(NULL, "dex_file == null");
- }
- return dex_files;
-}
-
-static void DexFile_closeDexFile(JNIEnv* env, jclass, jlong cookie) {
- std::unique_ptr<std::vector<std::unique_ptr<const DexFile>>> dex_files(toDexFiles(cookie, env));
+static void DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
+ std::unique_ptr<std::vector<const DexFile*>> dex_files = ConvertJavaArrayToNative(env, cookie);
if (dex_files.get() == nullptr) {
+ DCHECK(env->ExceptionCheck());
return;
}
+
ScopedObjectAccess soa(env);
// The Runtime currently never unloads classes, which means any registered
@@ -171,19 +231,21 @@
// TODO: The Runtime should support unloading of classes and freeing of the
// dex files for those unloaded classes rather than leaking dex files here.
for (auto& dex_file : *dex_files) {
- if (Runtime::Current()->GetClassLinker()->IsDexFileRegistered(*dex_file)) {
- dex_file.release();
+ if (!Runtime::Current()->GetClassLinker()->IsDexFileRegistered(*dex_file)) {
+ delete dex_file;
}
}
}
static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, jobject javaLoader,
- jlong cookie) {
- std::vector<std::unique_ptr<const DexFile>>* dex_files = toDexFiles(cookie, env);
- if (dex_files == NULL) {
+ jobject cookie) {
+ std::unique_ptr<std::vector<const DexFile*>> dex_files = ConvertJavaArrayToNative(env, cookie);
+ if (dex_files.get() == nullptr) {
VLOG(class_linker) << "Failed to find dex_file";
- return NULL;
+ DCHECK(env->ExceptionCheck());
+ return nullptr;
}
+
ScopedUtfChars class_name(env, javaName);
if (class_name.c_str() == NULL) {
VLOG(class_linker) << "Failed to find class_name";
@@ -221,36 +283,38 @@
};
// Note: this can be an expensive call, as we sort out duplicates in MultiDex files.
-static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jlong cookie) {
- jobjectArray result = nullptr;
- std::vector<std::unique_ptr<const DexFile>>* dex_files = toDexFiles(cookie, env);
+static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jobject cookie) {
+ std::unique_ptr<std::vector<const DexFile*>> dex_files = ConvertJavaArrayToNative(env, cookie);
+ if (dex_files.get() == nullptr) {
+ DCHECK(env->ExceptionCheck());
+ return nullptr;
+ }
- if (dex_files != nullptr) {
- // Push all class descriptors into a set. Use set instead of unordered_set as we want to
- // retrieve all in the end.
- std::set<const char*, CharPointerComparator> descriptors;
- for (auto& dex_file : *dex_files) {
- for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
- const char* descriptor = dex_file->GetClassDescriptor(class_def);
- descriptors.insert(descriptor);
- }
+ // Push all class descriptors into a set. Use set instead of unordered_set as we want to
+ // retrieve all in the end.
+ std::set<const char*, CharPointerComparator> descriptors;
+ for (auto& dex_file : *dex_files) {
+ for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const char* descriptor = dex_file->GetClassDescriptor(class_def);
+ descriptors.insert(descriptor);
}
+ }
- // Now create output array and copy the set into it.
- result = env->NewObjectArray(descriptors.size(), WellKnownClasses::java_lang_String, nullptr);
- if (result != nullptr) {
- auto it = descriptors.begin();
- auto it_end = descriptors.end();
- jsize i = 0;
- for (; it != it_end; it++, ++i) {
- std::string descriptor(DescriptorToDot(*it));
- ScopedLocalRef<jstring> jdescriptor(env, env->NewStringUTF(descriptor.c_str()));
- if (jdescriptor.get() == nullptr) {
- return nullptr;
- }
- env->SetObjectArrayElement(result, i, jdescriptor.get());
+ // Now create output array and copy the set into it.
+ jobjectArray result = env->NewObjectArray(descriptors.size(), WellKnownClasses::java_lang_String,
+ nullptr);
+ if (result != nullptr) {
+ auto it = descriptors.begin();
+ auto it_end = descriptors.end();
+ jsize i = 0;
+ for (; it != it_end; it++, ++i) {
+ std::string descriptor(DescriptorToDot(*it));
+ ScopedLocalRef<jstring> jdescriptor(env, env->NewStringUTF(descriptor.c_str()));
+ if (jdescriptor.get() == nullptr) {
+ return nullptr;
}
+ env->SetObjectArrayElement(result, i, jdescriptor.get());
}
}
return result;
@@ -620,12 +684,12 @@
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(DexFile, closeDexFile, "(J)V"),
- NATIVE_METHOD(DexFile, defineClassNative, "(Ljava/lang/String;Ljava/lang/ClassLoader;J)Ljava/lang/Class;"),
- NATIVE_METHOD(DexFile, getClassNameList, "(J)[Ljava/lang/String;"),
+ NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)V"),
+ NATIVE_METHOD(DexFile, defineClassNative, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Object;)Ljava/lang/Class;"),
+ NATIVE_METHOD(DexFile, getClassNameList, "(Ljava/lang/Object;)[Ljava/lang/String;"),
NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
NATIVE_METHOD(DexFile, isDexOptNeededInternal, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)B"),
- NATIVE_METHOD(DexFile, openDexFileNative, "(Ljava/lang/String;Ljava/lang/String;I)J"),
+ NATIVE_METHOD(DexFile, openDexFileNative, "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
};
void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_DexFile.h b/runtime/native/dalvik_system_DexFile.h
index 487df05..7585ab9 100644
--- a/runtime/native/dalvik_system_DexFile.h
+++ b/runtime/native/dalvik_system_DexFile.h
@@ -21,6 +21,8 @@
namespace art {
+class DexFile;
+
void register_dalvik_system_DexFile(JNIEnv* env);
} // namespace art
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 9061bb3..356e3d2 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -52,17 +52,7 @@
CHECK(has_section);
oat_file->begin_ = elf_file->Begin() + offset;
oat_file->end_ = elf_file->Begin() + size + offset;
- return oat_file->Setup(error_msg) ? oat_file.release() : nullptr;
-}
-
-OatFile* OatFile::OpenMemory(std::vector<uint8_t>& oat_contents,
- const std::string& location,
- std::string* error_msg) {
- CHECK(!oat_contents.empty()) << location;
- CheckLocation(location);
- std::unique_ptr<OatFile> oat_file(new OatFile(location, false));
- oat_file->begin_ = &oat_contents[0];
- oat_file->end_ = &oat_contents[oat_contents.size()];
+ // Ignore the optional .bss section when opening non-executable.
return oat_file->Setup(error_msg) ? oat_file.release() : nullptr;
}
@@ -108,18 +98,6 @@
return OpenElfFile(file, location, nullptr, nullptr, false, false, error_msg);
}
-OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
- const std::string& location,
- uint8_t* requested_base,
- std::string* error_msg) {
- std::unique_ptr<OatFile> oat_file(new OatFile(location, true));
- bool success = oat_file->Dlopen(elf_filename, requested_base, error_msg);
- if (!success) {
- return nullptr;
- }
- return oat_file.release();
-}
-
OatFile* OatFile::OpenElfFile(File* file,
const std::string& location,
uint8_t* requested_base,
@@ -138,8 +116,8 @@
}
OatFile::OatFile(const std::string& location, bool is_executable)
- : location_(location), begin_(NULL), end_(NULL), is_executable_(is_executable),
- dlopen_handle_(NULL),
+ : location_(location), begin_(NULL), end_(NULL), bss_begin_(nullptr), bss_end_(nullptr),
+ is_executable_(is_executable), dlopen_handle_(NULL),
secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
}
@@ -151,43 +129,6 @@
}
}
-bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base,
- std::string* error_msg) {
- char* absolute_path = realpath(elf_filename.c_str(), NULL);
- if (absolute_path == NULL) {
- *error_msg = StringPrintf("Failed to find absolute path for '%s'", elf_filename.c_str());
- return false;
- }
- dlopen_handle_ = dlopen(absolute_path, RTLD_NOW);
- free(absolute_path);
- if (dlopen_handle_ == NULL) {
- *error_msg = StringPrintf("Failed to dlopen '%s': %s", elf_filename.c_str(), dlerror());
- return false;
- }
- begin_ = reinterpret_cast<uint8_t*>(dlsym(dlopen_handle_, "oatdata"));
- if (begin_ == NULL) {
- *error_msg = StringPrintf("Failed to find oatdata symbol in '%s': %s", elf_filename.c_str(),
- dlerror());
- return false;
- }
- if (requested_base != NULL && begin_ != requested_base) {
- *error_msg = StringPrintf("Failed to find oatdata symbol at expected address: "
- "oatdata=%p != expected=%p /proc/self/maps:\n",
- begin_, requested_base);
- ReadFileToString("/proc/self/maps", error_msg);
- return false;
- }
- end_ = reinterpret_cast<uint8_t*>(dlsym(dlopen_handle_, "oatlastword"));
- if (end_ == NULL) {
- *error_msg = StringPrintf("Failed to find oatlastword symbol in '%s': %s", elf_filename.c_str(),
- dlerror());
- return false;
- }
- // Readjust to be non-inclusive upper bound.
- end_ += sizeof(uint32_t);
- return Setup(error_msg);
-}
-
bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, uint8_t* oat_file_begin,
bool writable, bool executable,
std::string* error_msg) {
@@ -209,10 +150,10 @@
return false;
}
if (requested_base != NULL && begin_ != requested_base) {
+ PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
*error_msg = StringPrintf("Failed to find oatdata symbol at expected address: "
- "oatdata=%p != expected=%p /proc/self/maps:\n",
+ "oatdata=%p != expected=%p. See process maps in the log.",
begin_, requested_base);
- ReadFileToString("/proc/self/maps", error_msg);
return false;
}
end_ = elf_file_->FindDynamicSymbolAddress("oatlastword");
@@ -222,6 +163,23 @@
}
// Readjust to be non-inclusive upper bound.
end_ += sizeof(uint32_t);
+
+ bss_begin_ = elf_file_->FindDynamicSymbolAddress("oatbss");
+ if (bss_begin_ == nullptr) {
+ // No .bss section. Clear dlerror().
+ bss_end_ = nullptr;
+ dlerror();
+ } else {
+ bss_end_ = elf_file_->FindDynamicSymbolAddress("oatbsslastword");
+ if (bss_end_ == nullptr) {
+ *error_msg = StringPrintf("Failed to find oatbasslastword symbol in '%s'",
+ file->GetPath().c_str());
+ return false;
+ }
+ // Readjust to be non-inclusive upper bound.
+ bss_end_ += sizeof(uint32_t);
+ }
+
return Setup(error_msg);
}
@@ -363,6 +321,14 @@
return end_;
}
+const uint8_t* OatFile::BssBegin() const {
+ return bss_begin_;
+}
+
+const uint8_t* OatFile::BssEnd() const {
+ return bss_end_;
+}
+
const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
const uint32_t* dex_location_checksum,
bool warn_if_not_found) const {
@@ -577,12 +543,12 @@
}
if (oat_file_->IsExecutable() ||
Runtime::Current() == nullptr || // This case applies for oatdump.
- Runtime::Current()->IsCompiler()) {
+ Runtime::Current()->IsAotCompiler()) {
return OatMethod(oat_file_->Begin(), oat_method_offsets->code_offset_);
- } else {
- // We aren't allowed to use the compiled code. We just force it down the interpreted version.
- return OatMethod(oat_file_->Begin(), 0);
}
+ // We aren't allowed to use the compiled code. We just force it down the interpreted / jit
+ // version.
+ return OatMethod(oat_file_->Begin(), 0);
}
void OatFile::OatMethod::LinkMethod(mirror::ArtMethod* method) const {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 6ae3c3e..564185c 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -62,11 +62,6 @@
// Opens an oat file from an already opened File. Maps it PROT_READ, MAP_PRIVATE.
static OatFile* OpenReadable(File* file, const std::string& location, std::string* error_msg);
- // Open an oat file backed by a std::vector with the given location.
- static OatFile* OpenMemory(std::vector<uint8_t>& oat_contents,
- const std::string& location,
- std::string* error_msg);
-
~OatFile();
bool IsExecutable() const {
@@ -147,8 +142,8 @@
return reinterpret_cast<T>(begin_ + offset);
}
- const uint8_t* const begin_;
- const uint32_t code_offset_;
+ const uint8_t* begin_;
+ uint32_t code_offset_;
friend class OatClass;
};
@@ -274,17 +269,19 @@
return End() - Begin();
}
+ size_t BssSize() const {
+ return BssEnd() - BssBegin();
+ }
+
const uint8_t* Begin() const;
const uint8_t* End() const;
+ const uint8_t* BssBegin() const;
+ const uint8_t* BssEnd() const;
+
private:
static void CheckLocation(const std::string& location);
- static OatFile* OpenDlopen(const std::string& elf_filename,
- const std::string& location,
- uint8_t* requested_base,
- std::string* error_msg);
-
static OatFile* OpenElfFile(File* file,
const std::string& location,
uint8_t* requested_base,
@@ -294,7 +291,6 @@
std::string* error_msg);
explicit OatFile(const std::string& filename, bool executable);
- bool Dlopen(const std::string& elf_filename, uint8_t* requested_base, std::string* error_msg);
bool ElfFileOpen(File* file, uint8_t* requested_base,
uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable, bool executable,
@@ -312,6 +308,12 @@
// Pointer to end of oat region for bounds checking.
const uint8_t* end_;
+ // Pointer to the .bss section, if present, otherwise nullptr.
+ const uint8_t* bss_begin_;
+
+ // Pointer to the end of the .bss section, if present, otherwise nullptr.
+ const uint8_t* bss_end_;
+
// Was this oat_file loaded executable?
const bool is_executable_;
diff --git a/runtime/object_lock.cc b/runtime/object_lock.cc
index f7accc0..749fb5d 100644
--- a/runtime/object_lock.cc
+++ b/runtime/object_lock.cc
@@ -47,6 +47,7 @@
obj_->NotifyAll(self_);
}
+template class ObjectLock<mirror::ArtMethod>;
template class ObjectLock<mirror::Class>;
template class ObjectLock<mirror::Object>;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index c0c7baa..9d87ed7 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -146,6 +146,15 @@
.Define({"-XX:EnableHSpaceCompactForOOM", "-XX:DisableHSpaceCompactForOOM"})
.WithValues({true, false})
.IntoKey(M::EnableHSpaceCompactForOOM)
+ .Define({"-Xjit", "-Xnojit"})
+ .WithValues({true, false})
+ .IntoKey(M::UseJIT)
+ .Define("-Xjitcodecachesize:_")
+ .WithType<MemoryKiB>()
+ .IntoKey(M::JITCodeCacheCapacity)
+ .Define("-Xjitthreshold:_")
+ .WithType<unsigned int>()
+ .IntoKey(M::JITCompileThreshold)
.Define("-XX:HspaceCompactForOOMMinIntervalMs=_") // in ms
.WithType<MillisecondsToNanoseconds>() // store as ns
.IntoKey(M::HSpaceCompactForOOMMinIntervalsMs)
@@ -248,7 +257,7 @@
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
"-Xdexopt:_", "-Xnoquithandler", "-Xjnigreflimit:_", "-Xgenregmap", "-Xnogenregmap",
"-Xverifyopt:_", "-Xcheckdexsum", "-Xincludeselectedop", "-Xjitop:_",
- "-Xincludeselectedmethod", "-Xjitthreshold:_", "-Xjitcodecachesize:_",
+ "-Xincludeselectedmethod", "-Xjitthreshold:_",
"-Xjitblocking", "-Xjitmethod:_", "-Xjitclass:_", "-Xjitoffset:_",
"-Xjitconfig:_", "-Xjitcheckcg", "-Xjitverbose", "-Xjitprofile",
"-Xjitdisableopt", "-Xjitsuspendpoll", "-XX:mainThreadStackSize=_"})
@@ -353,19 +362,20 @@
bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognized,
RuntimeArgumentMap* runtime_options) {
-// gLogVerbosity.class_linker = true; // TODO: don't check this in!
-// gLogVerbosity.compiler = true; // TODO: don't check this in!
-// gLogVerbosity.gc = true; // TODO: don't check this in!
-// gLogVerbosity.heap = true; // TODO: don't check this in!
-// gLogVerbosity.jdwp = true; // TODO: don't check this in!
-// gLogVerbosity.jni = true; // TODO: don't check this in!
-// gLogVerbosity.monitor = true; // TODO: don't check this in!
-// gLogVerbosity.profiler = true; // TODO: don't check this in!
-// gLogVerbosity.signals = true; // TODO: don't check this in!
-// gLogVerbosity.startup = true; // TODO: don't check this in!
-// gLogVerbosity.third_party_jni = true; // TODO: don't check this in!
-// gLogVerbosity.threads = true; // TODO: don't check this in!
-// gLogVerbosity.verifier = true; // TODO: don't check this in!
+ // gLogVerbosity.class_linker = true; // TODO: don't check this in!
+ // gLogVerbosity.compiler = true; // TODO: don't check this in!
+ // gLogVerbosity.gc = true; // TODO: don't check this in!
+ // gLogVerbosity.heap = true; // TODO: don't check this in!
+ // gLogVerbosity.jdwp = true; // TODO: don't check this in!
+ // gLogVerbosity.jit = true; // TODO: don't check this in!
+ // gLogVerbosity.jni = true; // TODO: don't check this in!
+ // gLogVerbosity.monitor = true; // TODO: don't check this in!
+ // gLogVerbosity.profiler = true; // TODO: don't check this in!
+ // gLogVerbosity.signals = true; // TODO: don't check this in!
+ // gLogVerbosity.startup = true; // TODO: don't check this in!
+ // gLogVerbosity.third_party_jni = true; // TODO: don't check this in!
+ // gLogVerbosity.threads = true; // TODO: don't check this in!
+ // gLogVerbosity.verifier = true; // TODO: don't check this in!
for (size_t i = 0; i < options.size(); ++i) {
if (true && options[0].first == "-Xzygote") {
@@ -560,7 +570,7 @@
UsageMessage(stream, "The following standard options are supported:\n");
UsageMessage(stream, " -classpath classpath (-cp classpath)\n");
UsageMessage(stream, " -Dproperty=value\n");
- UsageMessage(stream, " -verbose:tag ('gc', 'jni', or 'class')\n");
+ UsageMessage(stream, " -verbose:tag ('gc', 'jit', 'jni', or 'class')\n");
UsageMessage(stream, " -showversion\n");
UsageMessage(stream, " -help\n");
UsageMessage(stream, " -agentlib:jdwp=options\n");
@@ -590,6 +600,8 @@
UsageMessage(stream, " -XX:ForegroundHeapGrowthMultiplier=doublevalue\n");
UsageMessage(stream, " -XX:LowMemoryMode\n");
UsageMessage(stream, " -Xprofile:{threadcpuclock,wallclock,dualclock}\n");
+ UsageMessage(stream, " -Xjitcodecachesize:N\n");
+ UsageMessage(stream, " -Xjitthreshold:integervalue\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following unique to ART options are supported:\n");
@@ -630,6 +642,8 @@
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
UsageMessage(stream, " -Xpatchoat:filename\n");
+ UsageMessage(stream, " -Xjit\n");
+ UsageMessage(stream, " -Xnojit\n");
UsageMessage(stream, " -X[no]relocate\n");
UsageMessage(stream, " -X[no]dex2oat (Whether to invoke dex2oat on the application)\n");
UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n");
@@ -659,8 +673,6 @@
UsageMessage(stream, " -Xincludeselectedop\n");
UsageMessage(stream, " -Xjitop:hexopvalue[-endvalue][,hexopvalue[-endvalue]]*\n");
UsageMessage(stream, " -Xincludeselectedmethod\n");
- UsageMessage(stream, " -Xjitthreshold:integervalue\n");
- UsageMessage(stream, " -Xjitcodecachesize:decimalvalueofkbytes\n");
UsageMessage(stream, " -Xjitblocking\n");
UsageMessage(stream, " -Xjitmethod:signature[,signature]* (eg Ljava/lang/String\\;replace)\n");
UsageMessage(stream, " -Xjitclass:classname[,classname]*\n");
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index c3bdcb1..db372c3 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -161,7 +161,7 @@
CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
- !runtime->IsCompiler()));
+ !runtime->IsAotCompiler()));
Thread* self = Thread::Current();
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index d65b2d5..44e2844 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -77,7 +77,9 @@
InlineMethod* method) {
DCHECK(verifier != nullptr);
DCHECK_EQ(Runtime::Current()->IsCompiler(), method != nullptr);
- DCHECK_EQ(verifier->CanLoadClasses(), method != nullptr);
+ if (!Runtime::Current()->UseJit()) {
+ DCHECK_EQ(verifier->CanLoadClasses(), method != nullptr);
+ }
// We currently support only plain return or 2-instruction methods.
const DexFile::CodeItem* code_item = verifier->CodeItem();
@@ -110,6 +112,10 @@
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT:
case Instruction::IGET_WIDE:
+ // TODO: Add handling for JIT.
+ // case Instruction::IGET_QUICK:
+ // case Instruction::IGET_WIDE_QUICK:
+ // case Instruction::IGET_OBJECT_QUICK:
return AnalyseIGetMethod(verifier, method);
case Instruction::IPUT:
case Instruction::IPUT_OBJECT:
@@ -118,6 +124,10 @@
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT:
case Instruction::IPUT_WIDE:
+ // TODO: Add handling for JIT.
+ // case Instruction::IPUT_QUICK:
+ // case Instruction::IPUT_WIDE_QUICK:
+ // case Instruction::IPUT_OBJECT_QUICK:
return AnalyseIPutMethod(verifier, method);
default:
return false;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 8cd9e24..383308c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -65,6 +65,8 @@
#include "image.h"
#include "instrumentation.h"
#include "intern_table.h"
+#include "interpreter/interpreter.h"
+#include "jit/jit.h"
#include "jni_internal.h"
#include "mirror/array.h"
#include "mirror/art_field-inl.h"
@@ -229,6 +231,12 @@
// Make sure to let the GC complete if it is running.
heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
heap_->DeleteThreadPool();
+ if (jit_.get() != nullptr) {
+ VLOG(jit) << "Deleting jit thread pool";
+ // Delete thread pool before the thread list since we don't want to wait forever on the
+ // JIT compiler threads.
+ jit_->DeleteThreadPool();
+ }
// Make sure our internal threads are dead before we start tearing down things they're using.
Dbg::StopJdwp();
@@ -237,6 +245,13 @@
// Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
delete thread_list_;
+ // Delete the JIT after thread list to ensure that there is no remaining threads which could be
+ // accessing the instrumentation when we delete it.
+ if (jit_.get() != nullptr) {
+ VLOG(jit) << "Deleting jit";
+ jit_.reset(nullptr);
+ }
+
// Shutdown the fault manager if it was initialized.
fault_manager.Shutdown();
@@ -461,17 +476,24 @@
started_ = true;
- // Use !IsCompiler so that we get test coverage, tests are never the zygote.
- if (!IsCompiler()) {
+ // Use !IsAotCompiler so that we get test coverage, tests are never the zygote.
+ if (!IsAotCompiler()) {
ScopedObjectAccess soa(self);
gc::space::ImageSpace* image_space = heap_->GetImageSpace();
if (image_space != nullptr) {
- Runtime::Current()->GetInternTable()->AddImageStringsToTable(image_space);
- Runtime::Current()->GetClassLinker()->MoveImageClassesToClassTable();
+ GetInternTable()->AddImageStringsToTable(image_space);
+ GetClassLinker()->MoveImageClassesToClassTable();
}
}
- if (!IsImageDex2OatEnabled() || !Runtime::Current()->GetHeap()->HasImageSpace()) {
+ // If we are the zygote then we need to wait until after forking to create the code cache due to
+ // SELinux restrictions on r/w/x memory regions.
+ if (!IsZygote() && jit_.get() != nullptr) {
+ jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
+ jit_->CreateThreadPool();
+ }
+
+ if (!IsImageDex2OatEnabled() || !GetHeap()->HasImageSpace()) {
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(soa.Self());
auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
@@ -590,8 +612,14 @@
}
}
- // Create the thread pool.
+ // Create the thread pools.
heap_->CreateThreadPool();
+ if (jit_options_.get() != nullptr && jit_.get() == nullptr) {
+ // Create the JIT if the flag is set and we haven't already create it (happens for run-tests).
+ CreateJit();
+ jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
+ jit_->CreateThreadPool();
+ }
StartSignalCatcher();
@@ -818,6 +846,17 @@
Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
}
+ if (!IsCompiler()) {
+ // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
+ // this case.
+ // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
+ // nullptr and we don't create the jit.
+ jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
+ }
+ if (!IsZygote() && jit_options_.get() != nullptr) {
+ CreateJit();
+ }
+
BlockSignals();
InitPlatformSignalHandlers();
@@ -1066,26 +1105,26 @@
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
- CHECK(main_thread_group_ != NULL || IsCompiler());
+ CHECK(main_thread_group_ != NULL || IsAotCompiler());
system_thread_group_ =
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
- CHECK(system_thread_group_ != NULL || IsCompiler());
+ CHECK(system_thread_group_ != NULL || IsAotCompiler());
}
jobject Runtime::GetMainThreadGroup() const {
- CHECK(main_thread_group_ != NULL || IsCompiler());
+ CHECK(main_thread_group_ != NULL || IsAotCompiler());
return main_thread_group_;
}
jobject Runtime::GetSystemThreadGroup() const {
- CHECK(system_thread_group_ != NULL || IsCompiler());
+ CHECK(system_thread_group_ != NULL || IsAotCompiler());
return system_thread_group_;
}
jobject Runtime::GetSystemClassLoader() const {
- CHECK(system_class_loader_ != NULL || IsCompiler());
+ CHECK(system_class_loader_ != NULL || IsAotCompiler());
return system_class_loader_;
}
@@ -1341,7 +1380,7 @@
// TODO: use a special method for imt conflict method saves.
method->SetDexMethodIndex(DexFile::kDexNoIndex);
// When compiling, the code pointer will get set later when the image is loaded.
- if (runtime->IsCompiler()) {
+ if (runtime->IsAotCompiler()) {
size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
@@ -1350,6 +1389,10 @@
return method.Get();
}
+void Runtime::SetImtConflictMethod(mirror::ArtMethod* method) {
+ imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
+}
+
mirror::ArtMethod* Runtime::CreateResolutionMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
@@ -1360,7 +1403,7 @@
// TODO: use a special method for resolution method saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
// When compiling, the code pointer will get set later when the image is loaded.
- if (runtime->IsCompiler()) {
+ if (runtime->IsAotCompiler()) {
size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
@@ -1491,14 +1534,14 @@
// Transaction support.
void Runtime::EnterTransactionMode(Transaction* transaction) {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(transaction != nullptr);
DCHECK(!IsActiveTransaction());
preinitialization_transaction_ = transaction;
}
void Runtime::ExitTransactionMode() {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_ = nullptr;
}
@@ -1517,15 +1560,17 @@
const std::string& abort_message) {
DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
+ // Throwing an exception may cause its class initialization. If we mark the transaction
+ // aborted before that, we may warn with a false alarm. Throwing the exception before
+ // marking the transaction aborted avoids that.
+ preinitialization_transaction_->ThrowInternalError(self, false);
preinitialization_transaction_->Abort(abort_message);
- ThrowInternalErrorForAbortedTransaction(self);
}
void Runtime::ThrowInternalErrorForAbortedTransaction(Thread* self) {
DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
- DCHECK(IsTransactionAborted());
- preinitialization_transaction_->ThrowInternalError(self);
+ preinitialization_transaction_->ThrowInternalError(self, true);
}
void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
@@ -1558,51 +1603,51 @@
void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset,
uint32_t value, bool is_volatile) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteField32(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset,
uint64_t value, bool is_volatile) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteField64(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
mirror::Object* value, bool is_volatile) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldReference(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteArray(array, index, value);
}
void Runtime::RecordStrongStringInsertion(mirror::String* s) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordStrongStringInsertion(s);
}
void Runtime::RecordWeakStringInsertion(mirror::String* s) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWeakStringInsertion(s);
}
void Runtime::RecordStrongStringRemoval(mirror::String* s) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordStrongStringRemoval(s);
}
void Runtime::RecordWeakStringRemoval(mirror::String* s) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWeakStringRemoval(s);
}
@@ -1614,7 +1659,7 @@
void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
const {
- if (GetInstrumentation()->InterpretOnly()) {
+ if (GetInstrumentation()->InterpretOnly() || UseJit()) {
argv->push_back("--compiler-filter=interpret-only");
}
@@ -1634,4 +1679,16 @@
void Runtime::UpdateProfilerState(int state) {
VLOG(profiler) << "Profiler state updated to " << state;
}
+
+void Runtime::CreateJit() {
+ CHECK(jit_options_.get() != nullptr);
+ std::string error_msg;
+ jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
+ if (jit_.get() != nullptr) {
+ compiler_callbacks_ = jit_->GetCompilerCallbacks();
+ } else {
+ LOG(WARNING) << "Failed to create JIT " << error_msg;
+ }
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 944c8bd..5078b7f 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -48,6 +48,12 @@
class GarbageCollector;
} // namespace collector
} // namespace gc
+
+namespace jit {
+ class Jit;
+ class JitOptions;
+} // namespace jit
+
namespace mirror {
class ArtMethod;
class ClassLoader;
@@ -95,12 +101,18 @@
static bool Create(const RuntimeOptions& options, bool ignore_unrecognized)
SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
+ // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
+ bool IsAotCompiler() const {
+ return !UseJit() && IsCompiler();
+ }
+
+ // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
bool IsCompiler() const {
return compiler_callbacks_ != nullptr;
}
bool CanRelocate() const {
- return !IsCompiler() || compiler_callbacks_->IsRelocationPossible();
+ return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
}
bool ShouldRelocate() const {
@@ -339,9 +351,7 @@
return !imt_conflict_method_.IsNull();
}
- void SetImtConflictMethod(mirror::ArtMethod* method) {
- imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
- }
+ void SetImtConflictMethod(mirror::ArtMethod* method);
void SetImtUnimplementedMethod(mirror::ArtMethod* method) {
imt_unimplemented_method_ = GcRoot<mirror::ArtMethod>(method);
}
@@ -421,6 +431,14 @@
kUnload,
kInitialize
};
+
+ jit::Jit* GetJit() {
+ return jit_.get();
+ }
+ bool UseJit() const {
+ return jit_.get() != nullptr;
+ }
+
void PreZygoteFork();
bool InitZygote();
void DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa);
@@ -525,6 +543,8 @@
return zygote_max_failed_boots_;
}
+ void CreateJit();
+
private:
static void InitPlatformSignalHandlers();
@@ -604,6 +624,9 @@
JavaVMExt* java_vm_;
+ std::unique_ptr<jit::Jit> jit_;
+ std::unique_ptr<jit::JitOptions> jit_options_;
+
// Fault message, printed when we get a SIGSEGV.
Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::string fault_message_ GUARDED_BY(fault_message_lock_);
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index d9cc4d4..d072ffa 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -63,6 +63,9 @@
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
RUNTIME_OPTIONS_KEY (bool, UseTLAB, false)
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
+RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
+RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
+RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheCapacity, jit::JitCodeCache::kDefaultCapacity)
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
HSpaceCompactForOOMMinIntervalsMs,\
MsToNs(100 * 1000)) // 100s
diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h
index ebd52d7..7e59000 100644
--- a/runtime/runtime_options.h
+++ b/runtime/runtime_options.h
@@ -26,6 +26,8 @@
#include "runtime/base/logging.h"
#include "cmdline/unit.h"
#include "jdwp/jdwp.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "profiler_options.h"
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index e377542..26bf655 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -180,7 +180,7 @@
Runtime* runtime = Runtime::Current();
CHECK(runtime->AttachCurrentThread("Signal Catcher", true, runtime->GetSystemThreadGroup(),
- !runtime->IsCompiler()));
+ !runtime->IsAotCompiler()));
Thread* self = Thread::Current();
DCHECK_NE(self->GetState(), kRunnable);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index b39aebf..97a8d01 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -163,42 +163,10 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
- uint32_t vmap_offset;
- // TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
- bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
- uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
- if (!IsAccessibleRegister(reg, is_float)) {
- return false;
- }
- uintptr_t ptr_val = GetRegister(reg, is_float);
- bool target64 = Is64BitInstructionSet(kRuntimeISA);
- if (target64) {
- bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
- bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
- int64_t value_long = static_cast<int64_t>(ptr_val);
- if (wide_lo) {
- ptr_val = static_cast<uintptr_t>(value_long & 0xFFFFFFFF);
- } else if (wide_hi) {
- ptr_val = static_cast<uintptr_t>(value_long >> 32);
- }
- }
- *val = ptr_val;
- return true;
+ if (m->IsOptimized(sizeof(void*))) {
+ return GetVRegFromOptimizedCode(m, vreg, kind, val);
} else {
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
- // its instructions?
- uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
- frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
- DCHECK(addr != nullptr);
- *val = *addr;
- return true;
+ return GetVRegFromQuickCode(m, vreg, kind, val);
}
} else {
DCHECK(cur_shadow_frame_ != nullptr);
@@ -207,6 +175,86 @@
}
}
+bool StackVisitor::GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+ uint32_t* val) const {
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
+ DCHECK(code_pointer != nullptr);
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ uint32_t vmap_offset;
+ // TODO: IsInContext stops before spotting floating point registers.
+ if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
+ bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+ uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
+ uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
+ return GetRegisterIfAccessible(reg, kind, val);
+ } else {
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ *val = *GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+ return true;
+ }
+}
+
+bool StackVisitor::GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+ uint32_t* val) const {
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
+ DCHECK(code_pointer != nullptr);
+ uint32_t native_pc_offset = m->NativeQuickPcOffset(cur_quick_frame_pc_);
+ CodeInfo code_info = m->GetOptimizedCodeInfo();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ DCHECK_LT(vreg, code_item->registers_size_);
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map,
+ code_item->registers_size_);
+ DexRegisterMap::LocationKind location_kind = dex_register_map.GetLocationKind(vreg);
+ switch (location_kind) {
+ case DexRegisterMap::kInStack: {
+ const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg);
+ const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
+ *val = *reinterpret_cast<const uint32_t*>(addr);
+ return true;
+ }
+ case DexRegisterMap::kInRegister:
+ case DexRegisterMap::kInFpuRegister: {
+ uint32_t reg = dex_register_map.GetMachineRegister(vreg);
+ return GetRegisterIfAccessible(reg, kind, val);
+ }
+ case DexRegisterMap::kConstant:
+ *val = dex_register_map.GetConstant(vreg);
+ return true;
+ case DexRegisterMap::kNone:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
+ const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+ if (!IsAccessibleRegister(reg, is_float)) {
+ return false;
+ }
+ uintptr_t ptr_val = GetRegister(reg, is_float);
+ const bool target64 = Is64BitInstructionSet(kRuntimeISA);
+ if (target64) {
+ const bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
+ const bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
+ int64_t value_long = static_cast<int64_t>(ptr_val);
+ if (wide_lo) {
+ ptr_val = static_cast<uintptr_t>(Low32Bits(value_long));
+ } else if (wide_hi) {
+ ptr_val = static_cast<uintptr_t>(High32Bits(value_long));
+ }
+ }
+ *val = ptr_val;
+ return true;
+}
+
bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const {
if (kind_lo == kLongLoVReg) {
@@ -215,45 +263,15 @@
DCHECK_EQ(kind_hi, kDoubleHiVReg);
} else {
LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
+ UNREACHABLE();
}
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
- uint32_t vmap_offset_lo, vmap_offset_hi;
- // TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
- vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
- bool is_float = (kind_lo == kDoubleLoVReg);
- uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
- uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
- uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
- if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
- return false;
- }
- uintptr_t ptr_val_lo = GetRegister(reg_lo, is_float);
- uintptr_t ptr_val_hi = GetRegister(reg_hi, is_float);
- bool target64 = Is64BitInstructionSet(kRuntimeISA);
- if (target64) {
- int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo);
- int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi);
- ptr_val_lo = static_cast<uintptr_t>(value_long_lo & 0xFFFFFFFF);
- ptr_val_hi = static_cast<uintptr_t>(value_long_hi >> 32);
- }
- *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo);
- return true;
+ if (m->IsOptimized(sizeof(void*))) {
+ return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
} else {
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
- // its instructions?
- uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
- frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
- DCHECK(addr != nullptr);
- *val = *reinterpret_cast<uint64_t*>(addr);
- return true;
+ return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
}
} else {
DCHECK(cur_shadow_frame_ != nullptr);
@@ -262,61 +280,185 @@
}
}
+bool StackVisitor::GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+ VRegKind kind_hi, uint64_t* val) const {
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
+ DCHECK(code_pointer != nullptr);
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ uint32_t vmap_offset_lo, vmap_offset_hi;
+ // TODO: IsInContext stops before spotting floating point registers.
+ if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
+ vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
+ bool is_float = (kind_lo == kDoubleLoVReg);
+ uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
+ uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
+ uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
+ return GetRegisterPairIfAccessible(reg_lo, reg_hi, kind_lo, val);
+ } else {
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+ *val = *reinterpret_cast<uint64_t*>(addr);
+ return true;
+ }
+}
+
+bool StackVisitor::GetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
+ VRegKind kind_lo, VRegKind kind_hi,
+ uint64_t* val) const {
+ uint32_t low_32bits;
+ uint32_t high_32bits;
+ bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits);
+ success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits);
+ if (success) {
+ *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
+ }
+ return success;
+}
+
+bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
+ VRegKind kind_lo, uint64_t* val) const {
+ const bool is_float = (kind_lo == kDoubleLoVReg);
+ if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
+ return false;
+ }
+ uintptr_t ptr_val_lo = GetRegister(reg_lo, is_float);
+ uintptr_t ptr_val_hi = GetRegister(reg_hi, is_float);
+ bool target64 = Is64BitInstructionSet(kRuntimeISA);
+ if (target64) {
+ int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo);
+ int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi);
+ ptr_val_lo = static_cast<uintptr_t>(Low32Bits(value_long_lo));
+ ptr_val_hi = static_cast<uintptr_t>(High32Bits(value_long_hi));
+ }
+ *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo);
+ return true;
+}
+
bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind) {
if (cur_quick_frame_ != nullptr) {
- DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
- DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
- uint32_t vmap_offset;
- // TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
- bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
- const uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
- if (!IsAccessibleRegister(reg, is_float)) {
- return false;
+ DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
+ DCHECK(m == GetMethod());
+ if (m->IsOptimized(sizeof(void*))) {
+ return SetVRegFromOptimizedCode(m, vreg, new_value, kind);
+ } else {
+ return SetVRegFromQuickCode(m, vreg, new_value, kind);
}
- bool target64 = Is64BitInstructionSet(kRuntimeISA);
- // Deal with 32 or 64-bit wide registers in a way that builds on all targets.
- if (target64) {
- bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
- bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
- if (wide_lo || wide_hi) {
- uintptr_t old_reg_val = GetRegister(reg, is_float);
- uint64_t new_vreg_portion = static_cast<uint64_t>(new_value);
- uint64_t old_reg_val_as_wide = static_cast<uint64_t>(old_reg_val);
- uint64_t mask = 0xffffffff;
- if (wide_lo) {
- mask = mask << 32;
- } else {
- new_vreg_portion = new_vreg_portion << 32;
- }
- new_value = static_cast<uintptr_t>((old_reg_val_as_wide & mask) | new_vreg_portion);
- }
- }
- SetRegister(reg, new_value, is_float);
- return true;
} else {
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
- // its instructions?
- uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
- frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
- DCHECK(addr != nullptr);
- *addr = new_value;
+ cur_shadow_frame_->SetVReg(vreg, new_value);
return true;
}
+}
+
+bool StackVisitor::SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+ VRegKind kind) {
+ DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
+ DCHECK(m == GetMethod());
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
+ DCHECK(code_pointer != nullptr);
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ uint32_t vmap_offset;
+ // TODO: IsInContext stops before spotting floating point registers.
+ if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
+ bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+ uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
+ uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
+ return SetRegisterIfAccessible(reg, new_value, kind);
} else {
- DCHECK(cur_shadow_frame_ != nullptr);
- cur_shadow_frame_->SetVReg(vreg, new_value);
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+ *addr = new_value;
return true;
}
}
+bool StackVisitor::SetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+ VRegKind kind) {
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
+ DCHECK(code_pointer != nullptr);
+ uint32_t native_pc_offset = m->NativeQuickPcOffset(cur_quick_frame_pc_);
+ CodeInfo code_info = m->GetOptimizedCodeInfo();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ DCHECK_LT(vreg, code_item->registers_size_);
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map,
+ code_item->registers_size_);
+ DexRegisterMap::LocationKind location_kind = dex_register_map.GetLocationKind(vreg);
+ uint32_t dex_pc = m->ToDexPc(cur_quick_frame_pc_, false);
+ switch (location_kind) {
+ case DexRegisterMap::kInStack: {
+ const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + offset;
+ *reinterpret_cast<uint32_t*>(addr) = new_value;
+ return true;
+ }
+ case DexRegisterMap::kInRegister:
+ case DexRegisterMap::kInFpuRegister: {
+ uint32_t reg = dex_register_map.GetMachineRegister(vreg);
+ return SetRegisterIfAccessible(reg, new_value, kind);
+ }
+ case DexRegisterMap::kConstant:
+ LOG(ERROR) << StringPrintf("Cannot change value of DEX register v%u used as a constant at "
+ "DEX pc 0x%x (native pc 0x%x) of method %s",
+ vreg, dex_pc, native_pc_offset,
+ PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
+ return false;
+ case DexRegisterMap::kNone:
+ LOG(ERROR) << StringPrintf("No location for DEX register v%u at DEX pc 0x%x "
+ "(native pc 0x%x) of method %s",
+ vreg, dex_pc, native_pc_offset,
+ PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
+ return false;
+ default:
+ LOG(FATAL) << StringPrintf("Unknown location for DEX register v%u at DEX pc 0x%x "
+ "(native pc 0x%x) of method %s",
+ vreg, dex_pc, native_pc_offset,
+ PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
+ UNREACHABLE();
+ }
+}
+
+bool StackVisitor::SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind) {
+ const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+ if (!IsAccessibleRegister(reg, is_float)) {
+ return false;
+ }
+ const bool target64 = Is64BitInstructionSet(kRuntimeISA);
+
+ // Create a new value that can hold both low 32 and high 32 bits, in
+ // case we are running 64 bits.
+ uintptr_t full_new_value = new_value;
+ // Deal with 32 or 64-bit wide registers in a way that builds on all targets.
+ if (target64) {
+ bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
+ bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
+ if (wide_lo || wide_hi) {
+ uintptr_t old_reg_val = GetRegister(reg, is_float);
+ uint64_t new_vreg_portion = static_cast<uint64_t>(new_value);
+ uint64_t old_reg_val_as_wide = static_cast<uint64_t>(old_reg_val);
+ uint64_t mask = 0xffffffff;
+ if (wide_lo) {
+ mask = mask << 32;
+ } else {
+ new_vreg_portion = new_vreg_portion << 32;
+ }
+ full_new_value = static_cast<uintptr_t>((old_reg_val_as_wide & mask) | new_vreg_portion);
+ }
+ }
+ SetRegister(reg, full_new_value, is_float);
+ return true;
+}
+
bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi) {
if (kind_lo == kLongLoVReg) {
@@ -329,49 +471,10 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
- uint32_t vmap_offset_lo, vmap_offset_hi;
- // TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
- vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
- bool is_float = (kind_lo == kDoubleLoVReg);
- uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
- uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
- uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
- if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
- return false;
- }
- uintptr_t new_value_lo = static_cast<uintptr_t>(new_value & 0xFFFFFFFF);
- uintptr_t new_value_hi = static_cast<uintptr_t>(new_value >> 32);
- bool target64 = Is64BitInstructionSet(kRuntimeISA);
- // Deal with 32 or 64-bit wide registers in a way that builds on all targets.
- if (target64) {
- uintptr_t old_reg_val_lo = GetRegister(reg_lo, is_float);
- uintptr_t old_reg_val_hi = GetRegister(reg_hi, is_float);
- uint64_t new_vreg_portion_lo = static_cast<uint64_t>(new_value_lo);
- uint64_t new_vreg_portion_hi = static_cast<uint64_t>(new_value_hi) << 32;
- uint64_t old_reg_val_lo_as_wide = static_cast<uint64_t>(old_reg_val_lo);
- uint64_t old_reg_val_hi_as_wide = static_cast<uint64_t>(old_reg_val_hi);
- uint64_t mask_lo = static_cast<uint64_t>(0xffffffff) << 32;
- uint64_t mask_hi = 0xffffffff;
- new_value_lo = static_cast<uintptr_t>((old_reg_val_lo_as_wide & mask_lo) | new_vreg_portion_lo);
- new_value_hi = static_cast<uintptr_t>((old_reg_val_hi_as_wide & mask_hi) | new_vreg_portion_hi);
- }
- SetRegister(reg_lo, new_value_lo, is_float);
- SetRegister(reg_hi, new_value_hi, is_float);
- return true;
+ if (m->IsOptimized(sizeof(void*))) {
+ return SetVRegPairFromOptimizedCode(m, vreg, new_value, kind_lo, kind_hi);
} else {
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
- // its instructions?
- uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
- frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
- DCHECK(addr != nullptr);
- *reinterpret_cast<uint64_t*>(addr) = new_value;
- return true;
+ return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
}
} else {
DCHECK(cur_shadow_frame_ != nullptr);
@@ -380,6 +483,60 @@
}
}
+bool StackVisitor::SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ VRegKind kind_lo, VRegKind kind_hi) {
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
+ DCHECK(code_pointer != nullptr);
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ uint32_t vmap_offset_lo, vmap_offset_hi;
+ // TODO: IsInContext stops before spotting floating point registers.
+ if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
+ vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
+ bool is_float = (kind_lo == kDoubleLoVReg);
+ uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
+ uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
+ uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
+ return SetRegisterPairIfAccessible(reg_lo, reg_hi, new_value, is_float);
+ } else {
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+ *reinterpret_cast<uint64_t*>(addr) = new_value;
+ return true;
+ }
+}
+
+bool StackVisitor::SetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ VRegKind kind_lo, VRegKind kind_hi) {
+ uint32_t low_32bits = Low32Bits(new_value);
+ uint32_t high_32bits = High32Bits(new_value);
+ bool success = SetVRegFromOptimizedCode(m, vreg, low_32bits, kind_lo);
+ success &= SetVRegFromOptimizedCode(m, vreg + 1, high_32bits, kind_hi);
+ return success;
+}
+
+bool StackVisitor::SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
+ uint64_t new_value, bool is_float) {
+ if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
+ return false;
+ }
+ uintptr_t new_value_lo = static_cast<uintptr_t>(new_value & 0xFFFFFFFF);
+ uintptr_t new_value_hi = static_cast<uintptr_t>(new_value >> 32);
+ bool target64 = Is64BitInstructionSet(kRuntimeISA);
+ // Deal with 32 or 64-bit wide registers in a way that builds on all targets.
+ if (target64) {
+ DCHECK_EQ(reg_lo, reg_hi);
+ SetRegister(reg_lo, new_value, is_float);
+ } else {
+ SetRegister(reg_lo, new_value_lo, is_float);
+ SetRegister(reg_hi, new_value_hi, is_float);
+ }
+ return true;
+}
+
bool StackVisitor::IsAccessibleGPR(uint32_t reg) const {
DCHECK(context_ != nullptr);
return context_->IsAccessibleGPR(reg);
diff --git a/runtime/stack.h b/runtime/stack.h
index 5a86ca1..b495f03 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -492,7 +492,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t val;
bool success = GetVReg(m, vreg, kind, &val);
- CHECK(success) << "Failed to read vreg " << vreg << " of kind " << kind;
+ CHECK(success) << "Failed to read v" << vreg << " of kind " << kind << " in method "
+ << PrettyMethod(m);
return val;
}
@@ -505,7 +506,8 @@
uint64_t val;
bool success = GetVRegPair(m, vreg, kind_lo, kind_hi, &val);
CHECK(success) << "Failed to read vreg pair " << vreg
- << " of kind [" << kind_lo << "," << kind_hi << "]";
+ << " of kind [" << kind_lo << "," << kind_hi << "] in method "
+ << PrettyMethod(m);
return val;
}
@@ -673,6 +675,45 @@
uintptr_t GetFPR(uint32_t reg) const;
void SetFPR(uint32_t reg, uintptr_t value);
+ bool GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+ uint32_t* val) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+ uint32_t* val) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+ VRegKind kind_hi, uint64_t* val) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool GetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
+ VRegKind kind_lo, VRegKind kind_hi,
+ uint64_t* val) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
+ uint64_t* val) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+ VRegKind kind)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool SetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+ VRegKind kind)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ VRegKind kind_lo, VRegKind kind_hi)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool SetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ VRegKind kind_lo, VRegKind kind_hi)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value,
+ bool is_float)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Thread* const thread_;
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index fd22361..ec37699 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -104,10 +104,9 @@
return "in fpu register";
case kConstant:
return "as constant";
- default:
- LOG(FATAL) << "Invalid location kind " << static_cast<int>(kind);
- return nullptr;
}
+ UNREACHABLE();
+ return nullptr;
}
LocationKind GetLocationKind(uint16_t register_index) const {
@@ -126,6 +125,23 @@
kFixedSize + sizeof(LocationKind) + register_index * SingleEntrySize());
}
+ int32_t GetStackOffsetInBytes(uint16_t register_index) const {
+ DCHECK(GetLocationKind(register_index) == kInStack);
+ // We currently encode the offset in bytes.
+ return GetValue(register_index);
+ }
+
+ int32_t GetConstant(uint16_t register_index) const {
+ DCHECK(GetLocationKind(register_index) == kConstant);
+ return GetValue(register_index);
+ }
+
+ int32_t GetMachineRegister(uint16_t register_index) const {
+ DCHECK(GetLocationKind(register_index) == kInRegister
+ || GetLocationKind(register_index) == kInFpuRegister);
+ return GetValue(register_index);
+ }
+
static size_t SingleEntrySize() {
return sizeof(LocationKind) + sizeof(int32_t);
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3b48f49..79d0066 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -541,7 +541,7 @@
// Set stack_end_ to the bottom of the stack saving space of stack overflows
Runtime* runtime = Runtime::Current();
- bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsCompiler();
+ bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
ResetDefaultStackEnd();
// Install the protected region if we are doing implicit overflow checks.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index d0f014a..83c5ffb 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1107,9 +1107,11 @@
Locks::thread_list_lock_->ExclusiveLock(self);
bool removed = true;
if (!Contains(self)) {
+ std::string thread_name;
+ self->GetThreadName(thread_name);
std::ostringstream os;
DumpNativeStack(os, GetTid(), " native: ", nullptr);
- LOG(ERROR) << "Request to unregister unattached thread\n" << os.str();
+ LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
} else {
Locks::thread_suspend_count_lock_->ExclusiveLock(self);
if (!self->IsSuspended()) {
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 587eb32..2a82285 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -31,7 +31,7 @@
name_(name) {
std::string error_msg;
stack_.reset(MemMap::MapAnonymous(name.c_str(), nullptr, stack_size, PROT_READ | PROT_WRITE,
- false, &error_msg));
+ false, false, &error_msg));
CHECK(stack_.get() != nullptr) << error_msg;
const char* reason = "new thread pool worker thread";
pthread_attr_t attr;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 0950abeb..93b3877 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -298,7 +298,7 @@
intptr_t interval_us = reinterpret_cast<intptr_t>(arg);
CHECK_GE(interval_us, 0);
CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(),
- !runtime->IsCompiler()));
+ !runtime->IsAotCompiler()));
while (true) {
usleep(interval_us);
@@ -627,6 +627,12 @@
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
+void Trace::BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method,
+ int32_t /*dex_pc_offset*/)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method);
+}
+
void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) {
if (UseThreadCpuClock()) {
uint64_t clock_base = thread->GetTraceClockBase();
diff --git a/runtime/trace.h b/runtime/trace.h
index ead1c29..9ba30d5 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -99,7 +99,8 @@
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
-
+ void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
static std::vector<mirror::ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 7e2e0a6..c0fd7a5 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -32,7 +32,7 @@
Transaction::Transaction()
: log_lock_("transaction log lock", kTransactionLogLock), aborted_(false) {
- CHECK(Runtime::Current()->IsCompiler());
+ CHECK(Runtime::Current()->IsAotCompiler());
}
Transaction::~Transaction() {
@@ -70,8 +70,10 @@
}
}
-void Transaction::ThrowInternalError(Thread* self) {
- DCHECK(IsAborted());
+void Transaction::ThrowInternalError(Thread* self, bool rethrow) {
+ if (kIsDebugBuild && rethrow) {
+ CHECK(IsAborted()) << "Rethrow InternalError while transaction is not aborted";
+ }
std::string abort_msg(GetAbortMessage());
self->ThrowNewException(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
abort_msg.c_str());
diff --git a/runtime/transaction.h b/runtime/transaction.h
index be614f9..e1b93c9 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -45,7 +45,7 @@
void Abort(const std::string& abort_message)
LOCKS_EXCLUDED(log_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ThrowInternalError(Thread* self)
+ void ThrowInternalError(Thread* self, bool rethrow)
LOCKS_EXCLUDED(log_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsAborted() LOCKS_EXCLUDED(log_lock_);
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 85c9340..851eceb 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -133,14 +133,14 @@
}
bool ReadFileToString(const std::string& file_name, std::string* result) {
- std::unique_ptr<File> file(new File);
- if (!file->Open(file_name, O_RDONLY)) {
+ File file;
+ if (!file.Open(file_name, O_RDONLY)) {
return false;
}
std::vector<char> buf(8 * KB);
while (true) {
- int64_t n = TEMP_FAILURE_RETRY(read(file->Fd(), &buf[0], buf.size()));
+ int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[0], buf.size()));
if (n == -1) {
return false;
}
@@ -151,6 +151,59 @@
}
}
+bool PrintFileToLog(const std::string& file_name, LogSeverity level) {
+ File file;
+ if (!file.Open(file_name, O_RDONLY)) {
+ return false;
+ }
+
+ constexpr size_t kBufSize = 256; // Small buffer. Avoid stack overflow and stack size warnings.
+ char buf[kBufSize + 1]; // +1 for terminator.
+ size_t filled_to = 0;
+ while (true) {
+ DCHECK_LT(filled_to, kBufSize);
+ int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[filled_to], kBufSize - filled_to));
+ if (n <= 0) {
+ // Print the rest of the buffer, if it exists.
+ if (filled_to > 0) {
+ buf[filled_to] = 0;
+ LOG(level) << buf;
+ }
+ return n == 0;
+ }
+ // Scan for '\n'.
+ size_t i = filled_to;
+ bool found_newline = false;
+ for (; i < filled_to + n; ++i) {
+ if (buf[i] == '\n') {
+ // Found a line break, that's something to print now.
+ buf[i] = 0;
+ LOG(level) << buf;
+ // Copy the rest to the front.
+ if (i + 1 < filled_to + n) {
+ memmove(&buf[0], &buf[i + 1], filled_to + n - i - 1);
+ filled_to = filled_to + n - i - 1;
+ } else {
+ filled_to = 0;
+ }
+ found_newline = true;
+ break;
+ }
+ }
+ if (found_newline) {
+ continue;
+ } else {
+ filled_to += n;
+ // Check if we must flush now.
+ if (filled_to == kBufSize) {
+ buf[kBufSize] = 0;
+ LOG(level) << buf;
+ filled_to = 0;
+ }
+ }
+ }
+}
+
std::string GetIsoDate() {
time_t now = time(NULL);
tm tmbuf;
@@ -1211,14 +1264,6 @@
return;
}
-#if !defined(HAVE_ANDROID_OS)
- if (GetTid() != tid) {
- // TODO: dumping of other threads is disabled to avoid crashes during stress testing.
- // b/15446488.
- return;
- }
-#endif
-
std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n";
@@ -1262,9 +1307,9 @@
os << "+" << it->func_offset;
}
try_addr2line = true;
- } else if (current_method != nullptr &&
- Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
- current_method->PcIsWithinQuickCode(it->pc)) {
+ } else if (
+ current_method != nullptr && Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
+ current_method->PcIsWithinQuickCode(it->pc)) {
const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
os << JniLongName(current_method) << "+"
<< (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
@@ -1466,14 +1511,16 @@
std::string DexFilenameToOdexFilename(const std::string& location, const InstructionSet isa) {
// location = /foo/bar/baz.jar
// odex_location = /foo/bar/<isa>/baz.odex
-
- CHECK_GE(location.size(), 4U) << location; // must be at least .123
std::string odex_location(location);
InsertIsaDirectory(isa, &odex_location);
- size_t dot_index = odex_location.size() - 3 - 1; // 3=dex or zip or apk
- CHECK_EQ('.', odex_location[dot_index]) << location;
+ size_t dot_index = odex_location.rfind('.');
+
+ // The location must have an extension, otherwise it's not clear what we
+ // should return.
+ CHECK_NE(dot_index, std::string::npos) << odex_location;
+ CHECK_EQ(std::string::npos, odex_location.find('/', dot_index)) << odex_location;
+
odex_location.resize(dot_index + 1);
- CHECK_EQ('.', odex_location[odex_location.size()-1]) << location << " " << odex_location;
odex_location += "odex";
return odex_location;
}
diff --git a/runtime/utils.h b/runtime/utils.h
index 3191e7d..9d04d35 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -410,6 +410,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool ReadFileToString(const std::string& file_name, std::string* result);
+bool PrintFileToLog(const std::string& file_name, LogSeverity level);
// Returns the current date in ISO yyyy-mm-dd hh:mm:ss format.
std::string GetIsoDate();
@@ -515,8 +516,9 @@
// Returns the system location for an image
std::string GetSystemImageFilename(const char* location, InstructionSet isa);
-// Returns an .odex file name next adjacent to the dex location.
+// Returns an .odex file name adjacent to the dex location.
// For example, for "/foo/bar/baz.jar", return "/foo/bar/<isa>/baz.odex".
+// The dex location must include a directory component and have an extension.
// Note: does not support multidex location strings.
std::string DexFilenameToOdexFilename(const std::string& location, InstructionSet isa);
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index a3dd13c..5465762 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -374,6 +374,8 @@
TEST_F(UtilsTest, DexFilenameToOdexFilename) {
EXPECT_STREQ("/foo/bar/arm/baz.odex",
DexFilenameToOdexFilename("/foo/bar/baz.jar", kArm).c_str());
+ EXPECT_STREQ("/foo/bar/arm/baz.odex",
+ DexFilenameToOdexFilename("/foo/bar/baz.funnyext", kArm).c_str());
}
TEST_F(UtilsTest, ExecSuccess) {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 474a066..87a29ed 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -24,6 +24,7 @@
#include "compiler_callbacks.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
+#include "dex_instruction_utils.h"
#include "dex_instruction_visitor.h"
#include "gc/accounting/card_table-inl.h"
#include "indenter.h"
@@ -111,6 +112,20 @@
reg_line->MarkAllRegistersAsConflicts(verifier);
}
+MethodVerifier::FailureKind MethodVerifier::VerifyMethod(
+ mirror::ArtMethod* method, bool allow_soft_failures, std::string* error ATTRIBUTE_UNUSED) {
+ Thread* self = Thread::Current();
+ StackHandleScope<3> hs(self);
+ mirror::Class* klass = method->GetDeclaringClass();
+ auto h_dex_cache(hs.NewHandle(klass->GetDexCache()));
+ auto h_class_loader(hs.NewHandle(klass->GetClassLoader()));
+ auto h_method = hs.NewHandle(method);
+ return VerifyMethod(self, method->GetDexMethodIndex(), method->GetDexFile(), h_dex_cache,
+ h_class_loader, klass->GetClassDef(), method->GetCodeItem(), h_method,
+ method->GetAccessFlags(), allow_soft_failures, false);
+}
+
+
MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
mirror::Class* klass,
bool allow_soft_failures,
@@ -136,7 +151,7 @@
}
if (early_failure) {
*error = "Verifier rejected class " + PrettyDescriptor(klass) + failure_message;
- if (Runtime::Current()->IsCompiler()) {
+ if (Runtime::Current()->IsAotCompiler()) {
ClassReference ref(&dex_file, klass->GetDexClassDefIndex());
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
@@ -544,7 +559,7 @@
case VERIFY_ERROR_ACCESS_METHOD:
case VERIFY_ERROR_INSTANTIATION:
case VERIFY_ERROR_CLASS_CHANGE:
- if (Runtime::Current()->IsCompiler() || !can_load_classes_) {
+ if (Runtime::Current()->IsAotCompiler() || !can_load_classes_) {
// If we're optimistically running verification at compile time, turn NO_xxx, ACCESS_xxx,
// class change and instantiation errors into soft verification errors so that we re-verify
// at runtime. We may fail to find or to agree on access because of not yet available class
@@ -568,7 +583,7 @@
// Hard verification failures at compile time will still fail at runtime, so the class is
// marked as rejected to prevent it from being compiled.
case VERIFY_ERROR_BAD_CLASS_HARD: {
- if (Runtime::Current()->IsCompiler()) {
+ if (Runtime::Current()->IsAotCompiler()) {
ClassReference ref(dex_file_, dex_file_->GetIndexForClassDef(*class_def_));
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
@@ -844,7 +859,7 @@
result = false;
break;
}
- if (inst->GetVerifyIsRuntimeOnly() && Runtime::Current()->IsCompiler() && !verify_to_dump_) {
+ if (inst->GetVerifyIsRuntimeOnly() && Runtime::Current()->IsAotCompiler() && !verify_to_dump_) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "opcode only expected at runtime " << inst->Name();
result = false;
}
@@ -2812,8 +2827,8 @@
}
if (have_pending_hard_failure_) {
- if (Runtime::Current()->IsCompiler()) {
- /* When compiling, check that the last failure is a hard failure */
+ if (Runtime::Current()->IsAotCompiler()) {
+ /* When AOT compiling, check that the last failure is a hard failure */
CHECK_EQ(failures_[failures_.size() - 1], VERIFY_ERROR_BAD_CLASS_HARD);
}
/* immediate failure, reject class */
@@ -3941,28 +3956,16 @@
mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
RegisterLine* reg_line) {
- DCHECK(inst->Opcode() == Instruction::IGET_QUICK ||
- inst->Opcode() == Instruction::IGET_WIDE_QUICK ||
- inst->Opcode() == Instruction::IGET_OBJECT_QUICK ||
- inst->Opcode() == Instruction::IGET_BOOLEAN_QUICK ||
- inst->Opcode() == Instruction::IGET_BYTE_QUICK ||
- inst->Opcode() == Instruction::IGET_CHAR_QUICK ||
- inst->Opcode() == Instruction::IGET_SHORT_QUICK ||
- inst->Opcode() == Instruction::IPUT_QUICK ||
- inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
- inst->Opcode() == Instruction::IPUT_OBJECT_QUICK ||
- inst->Opcode() == Instruction::IPUT_BOOLEAN_QUICK ||
- inst->Opcode() == Instruction::IPUT_BYTE_QUICK ||
- inst->Opcode() == Instruction::IPUT_CHAR_QUICK ||
- inst->Opcode() == Instruction::IPUT_SHORT_QUICK);
+ DCHECK(IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) << inst->Opcode();
const RegType& object_type = reg_line->GetRegisterType(this, inst->VRegB_22c());
if (!object_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
return nullptr;
}
uint32_t field_offset = static_cast<uint32_t>(inst->VRegC_22c());
- mirror::ArtField* f = mirror::ArtField::FindInstanceFieldWithOffset(object_type.GetClass(),
- field_offset);
+ mirror::ArtField* const f = mirror::ArtField::FindInstanceFieldWithOffset(object_type.GetClass(),
+ field_offset);
+ DCHECK_EQ(f->GetOffset().Uint32Value(), field_offset);
if (f == nullptr) {
VLOG(verifier) << "Failed to find instance field at offset '" << field_offset
<< "' from '" << PrettyDescriptor(object_type.GetClass()) << "'";
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index b83e647..bdd6259 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -156,6 +156,9 @@
uint32_t method_access_flags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static FailureKind VerifyMethod(mirror::ArtMethod* method, bool allow_soft_failures,
+ std::string* error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
uint8_t EncodePcToReferenceMapData() const;
uint32_t DexFileVersion() const {
@@ -239,10 +242,14 @@
bool HasFailures() const;
const RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst,
- RegisterLine* reg_line,
+ // Returns the method of a quick invoke or nullptr if it cannot be found.
+ mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
bool is_range)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Returns the access field of a quick field access (iget/iput-quick) or nullptr
+ // if it cannot be found.
+ mirror::ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Is the method being verified a constructor?
bool IsConstructor() const {
@@ -532,11 +539,6 @@
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the access field of a quick field access (iget/iput-quick) or nullptr
- // if it cannot be found.
- mirror::ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
template <FieldAccessType kAccType>
void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 1dfbe51..c248565 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -580,8 +580,9 @@
}
void RegTypeCache::VisitRoots(RootCallback* callback, void* arg) {
- for (const RegType* entry : entries_) {
- entry->VisitRoots(callback, arg);
+ // Exclude the static roots that are visited by VisitStaticRoots().
+ for (size_t i = primitive_count_; i < entries_.size(); ++i) {
+ entries_[i]->VisitRoots(callback, arg);
}
}
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index e368d2c..78185bf 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -222,7 +222,7 @@
org_apache_harmony_dalvik_ddmc_DdmServer_broadcast = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "broadcast", "(I)V");
org_apache_harmony_dalvik_ddmc_DdmServer_dispatch = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "dispatch", "(I[BII)Lorg/apache/harmony/dalvik/ddmc/Chunk;");
- dalvik_system_DexFile_cookie = CacheField(env, dalvik_system_DexFile, false, "mCookie", "J");
+ dalvik_system_DexFile_cookie = CacheField(env, dalvik_system_DexFile, false, "mCookie", "Ljava/lang/Object;");
dalvik_system_PathClassLoader_pathList = CacheField(env, dalvik_system_PathClassLoader, false, "pathList", "Ldalvik/system/DexPathList;");
dalvik_system_DexPathList_dexElements = CacheField(env, dalvik_system_DexPathList, false, "dexElements", "[Ldalvik/system/DexPathList$Element;");
dalvik_system_DexPathList__Element_dexFile = CacheField(env, dalvik_system_DexPathList__Element, false, "dexFile", "Ldalvik/system/DexFile;");
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index 63bfc44..ffab674 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -57,7 +57,8 @@
name += zip_filename;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
NULL, GetUncompressedLength(),
- PROT_READ | PROT_WRITE, false, error_msg));
+ PROT_READ | PROT_WRITE, false, false,
+ error_msg));
if (map.get() == nullptr) {
DCHECK(!error_msg->empty());
return nullptr;
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index 31371f6..876d27e 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -65,6 +65,8 @@
#elif defined(__i386__) || defined(__x86_64__)
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uc->CTX_EIP += 3;
+#else
+ UNUSED(context);
#endif
}
diff --git a/test/134-reg-promotion/expected.txt b/test/134-reg-promotion/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/134-reg-promotion/expected.txt
diff --git a/test/134-reg-promotion/info.txt b/test/134-reg-promotion/info.txt
new file mode 100644
index 0000000..6eff7eb
--- /dev/null
+++ b/test/134-reg-promotion/info.txt
@@ -0,0 +1,4 @@
+Test that a vreg value that was defined by a const 0 and is used is both ref
+and float operations is flushed to all home location.
+
+See: b/19417710, b/7250540 & b.android.com/147187
diff --git a/test/134-reg-promotion/smali/Test.smali b/test/134-reg-promotion/smali/Test.smali
new file mode 100644
index 0000000..6a35c45
--- /dev/null
+++ b/test/134-reg-promotion/smali/Test.smali
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+
+.super Ljava/lang/Object;
+
+.method public static run()V
+ .registers 3
+ new-instance v2, Ljava/lang/String;
+ invoke-direct {v2}, Ljava/lang/String;-><init>()V
+ const/4 v0, 0
+ move v1, v0
+ :start
+ invoke-static {}, LMain;->blowup()V
+ if-ne v1, v0, :end
+ const/4 v2, 1
+ invoke-static {v2}, Ljava/lang/Integer;->toString(I)Ljava/lang/String;
+ move v2, v0
+ # The call makes v2 float type.
+ invoke-static {v2}, Ljava/lang/Float;->isNaN(F)Z
+ const/4 v1, 1
+ goto :start
+ :end
+ return-void
+.end method
diff --git a/test/134-reg-promotion/src/Main.java b/test/134-reg-promotion/src/Main.java
new file mode 100644
index 0000000..d45ec66
--- /dev/null
+++ b/test/134-reg-promotion/src/Main.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ static char [][] holder;
+ static boolean sawOome;
+
+ static void blowup() {
+ try {
+ for (int i = 0; i < holder.length; ++i) {
+ holder[i] = new char[1024 * 1024];
+ }
+ } catch (OutOfMemoryError oome) {
+ sawOome = true;
+ }
+ }
+
+ public static void main(String args[]) throws Exception {
+ Class<?> c = Class.forName("Test");
+ Method m = c.getMethod("run", (Class[]) null);
+ for (int i = 0; i < 10; i++) {
+ holder = new char[128 * 1024][];
+ m.invoke(null, (Object[]) null);
+ holder = null;
+ }
+ }
+}
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index 9391533..30aa870 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -314,6 +314,29 @@
array[10] = 1; // Bounds check can't be eliminated.
}
+
+ static byte readData() {
+ return 1;
+ }
+
+ // CHECK-START: void Main.circularBufferProducer() BCE (before)
+ // CHECK: BoundsCheck
+ // CHECK: ArraySet
+
+ // CHECK-START: void Main.circularBufferProducer() BCE (after)
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArraySet
+
+ static void circularBufferProducer() {
+ byte[] array = new byte[4096];
+ int i = 0;
+ while (true) {
+ array[i & (array.length - 1)] = readData();
+ i++;
+ }
+ }
+
+
// CHECK-START: void Main.pyramid1(int[]) BCE (before)
// CHECK: BoundsCheck
// CHECK: ArraySet
diff --git a/test/454-get-vreg/expected.txt b/test/454-get-vreg/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/454-get-vreg/expected.txt
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
new file mode 100644
index 0000000..937d2fe
--- /dev/null
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arch/context.h"
+#include "jni.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+#include "stack.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+class TestVisitor : public StackVisitor {
+ public:
+ TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(thread, context), this_value_(this_value), found_method_index_(0) {}
+
+ bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ std::string m_name(m->GetName());
+
+ if (m_name.compare("testSimpleVReg") == 0) {
+ found_method_index_ = 1;
+ uint32_t value = 0;
+
+ CHECK(GetVReg(m, 0, kIntVReg, &value));
+ CHECK_EQ(value, 42u);
+
+ bool success = GetVReg(m, 1, kIntVReg, &value);
+ if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+
+ success = GetVReg(m, 2, kIntVReg, &value);
+ if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+
+ CHECK(GetVReg(m, 3, kReferenceVReg, &value));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
+
+ CHECK(GetVReg(m, 4, kIntVReg, &value));
+ CHECK_EQ(value, 1u);
+
+ CHECK(GetVReg(m, 5, kFloatVReg, &value));
+ uint32_t cast = bit_cast<float, uint32_t>(1.0f);
+ CHECK_EQ(value, cast);
+
+ CHECK(GetVReg(m, 6, kIntVReg, &value));
+ CHECK_EQ(value, 2u);
+
+ CHECK(GetVReg(m, 7, kIntVReg, &value));
+ CHECK_EQ(value, true);
+
+ CHECK(GetVReg(m, 8, kIntVReg, &value));
+ CHECK_EQ(value, 3u);
+
+ CHECK(GetVReg(m, 9, kIntVReg, &value));
+ CHECK_EQ(value, static_cast<uint32_t>('c'));
+ } else if (m_name.compare("testPairVReg") == 0) {
+ found_method_index_ = 2;
+ uint64_t value = 0;
+ CHECK(GetVRegPair(m, 0, kLongLoVReg, kLongHiVReg, &value));
+ CHECK_EQ(value, 42u);
+
+ bool success = GetVRegPair(m, 2, kLongLoVReg, kLongHiVReg, &value);
+ if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+
+ success = GetVRegPair(m, 4, kLongLoVReg, kLongHiVReg, &value);
+ if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+
+ uint32_t value32 = 0;
+ CHECK(GetVReg(m, 6, kReferenceVReg, &value32));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(value32), this_value_);
+
+ CHECK(GetVRegPair(m, 7, kLongLoVReg, kLongHiVReg, &value));
+ CHECK_EQ(static_cast<int64_t>(value), std::numeric_limits<int64_t>::min());
+
+ CHECK(GetVRegPair(m, 9, kLongLoVReg, kLongHiVReg, &value));
+ CHECK_EQ(static_cast<int64_t>(value), std::numeric_limits<int64_t>::max());
+
+ CHECK(GetVRegPair(m, 11, kLongLoVReg, kLongHiVReg, &value));
+ CHECK_EQ(value, 0u);
+
+ CHECK(GetVRegPair(m, 13, kDoubleLoVReg, kDoubleHiVReg, &value));
+ uint64_t cast = bit_cast<double, uint64_t>(2.0);
+ CHECK_EQ(value, cast);
+ }
+
+ return true;
+ }
+
+ mirror::Object* this_value_;
+
+ // Value returned to Java to ensure the methods testSimpleVReg and testPairVReg
+ // have been found and tested.
+ jint found_method_index_;
+};
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_doNativeCall(JNIEnv*, jobject value) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<Context> context(Context::Create());
+ TestVisitor visitor(soa.Self(), context.get(), soa.Decode<mirror::Object*>(value));
+ visitor.WalkStack();
+ return visitor.found_method_index_;
+}
+
+} // namespace
+
+} // namespace art
diff --git a/test/454-get-vreg/info.txt b/test/454-get-vreg/info.txt
new file mode 100644
index 0000000..20df0b5
--- /dev/null
+++ b/test/454-get-vreg/info.txt
@@ -0,0 +1 @@
+Tests for inspecting DEX registers in a Java method.
diff --git a/test/454-get-vreg/src/Main.java b/test/454-get-vreg/src/Main.java
new file mode 100644
index 0000000..df07d44
--- /dev/null
+++ b/test/454-get-vreg/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public Main() {
+ }
+
+ int testSimpleVReg(int a, float f, short s, boolean z, byte b, char c) {
+ int e = doCall();
+ int g = doNativeCall();
+ return e + g;
+ }
+
+ long testPairVReg(long a, long b, long c, double e) {
+ long f = doCall();
+ long g = doNativeCall();
+ return f + g;
+ }
+
+ native int doNativeCall();
+
+ int doCall() {
+ return 42;
+ }
+
+ static {
+ System.loadLibrary("arttest");
+ }
+
+ public static void main(String[] args) {
+ Main rm = new Main();
+ if (rm.testSimpleVReg(1, 1.0f, (short)2, true, (byte)3, 'c') != 43) {
+ throw new Error("Expected 43");
+ }
+
+ if (rm.testPairVReg(Long.MIN_VALUE, Long.MAX_VALUE, 0, 2.0) != 44) {
+ throw new Error("Expected 44");
+ }
+ }
+}
diff --git a/test/455-set-vreg/expected.txt b/test/455-set-vreg/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/455-set-vreg/expected.txt
diff --git a/test/455-set-vreg/info.txt b/test/455-set-vreg/info.txt
new file mode 100644
index 0000000..e8c57b5
--- /dev/null
+++ b/test/455-set-vreg/info.txt
@@ -0,0 +1 @@
+Tests for setting DEX registers in a Java method.
diff --git a/test/455-set-vreg/set_vreg_jni.cc b/test/455-set-vreg/set_vreg_jni.cc
new file mode 100644
index 0000000..24d7832
--- /dev/null
+++ b/test/455-set-vreg/set_vreg_jni.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arch/context.h"
+#include "jni.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+#include "stack.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+class TestVisitor : public StackVisitor {
+ public:
+ TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(thread, context), this_value_(this_value) {}
+
+ bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ std::string m_name(m->GetName());
+
+ if (m_name.compare("testIntVReg") == 0) {
+ uint32_t value = 0;
+ CHECK(GetVReg(m, 1, kReferenceVReg, &value));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
+
+ CHECK(SetVReg(m, 2, 5, kIntVReg));
+ CHECK(SetVReg(m, 3, 4, kIntVReg));
+ CHECK(SetVReg(m, 4, 3, kIntVReg));
+ CHECK(SetVReg(m, 5, 2, kIntVReg));
+ CHECK(SetVReg(m, 6, 1, kIntVReg));
+ } else if (m_name.compare("testLongVReg") == 0) {
+ uint32_t value = 0;
+ CHECK(GetVReg(m, 3, kReferenceVReg, &value));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
+
+ CHECK(SetVRegPair(m, 4, std::numeric_limits<int64_t>::max(), kLongLoVReg, kLongHiVReg));
+ CHECK(SetVRegPair(m, 6, 4, kLongLoVReg, kLongHiVReg));
+ CHECK(SetVRegPair(m, 8, 3, kLongLoVReg, kLongHiVReg));
+ CHECK(SetVRegPair(m, 10, 2, kLongLoVReg, kLongHiVReg));
+ CHECK(SetVRegPair(m, 12, 1, kLongLoVReg, kLongHiVReg));
+ } else if (m_name.compare("testFloatVReg") == 0) {
+ uint32_t value = 0;
+ CHECK(GetVReg(m, 1, kReferenceVReg, &value));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
+
+ CHECK(SetVReg(m, 2, bit_cast<float, uint32_t>(5.0f), kFloatVReg));
+ CHECK(SetVReg(m, 3, bit_cast<float, uint32_t>(4.0f), kFloatVReg));
+ CHECK(SetVReg(m, 4, bit_cast<float, uint32_t>(3.0f), kFloatVReg));
+ CHECK(SetVReg(m, 5, bit_cast<float, uint32_t>(2.0f), kFloatVReg));
+ CHECK(SetVReg(m, 6, bit_cast<float, uint32_t>(1.0f), kFloatVReg));
+ } else if (m_name.compare("testDoubleVReg") == 0) {
+ uint32_t value = 0;
+ CHECK(GetVReg(m, 3, kReferenceVReg, &value));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
+
+ CHECK(SetVRegPair(m, 4, bit_cast<double, uint64_t>(5.0), kDoubleLoVReg, kDoubleHiVReg));
+ CHECK(SetVRegPair(m, 6, bit_cast<double, uint64_t>(4.0), kDoubleLoVReg, kDoubleHiVReg));
+ CHECK(SetVRegPair(m, 8, bit_cast<double, uint64_t>(3.0), kDoubleLoVReg, kDoubleHiVReg));
+ CHECK(SetVRegPair(m, 10, bit_cast<double, uint64_t>(2.0), kDoubleLoVReg, kDoubleHiVReg));
+ CHECK(SetVRegPair(m, 12, bit_cast<double, uint64_t>(1.0), kDoubleLoVReg, kDoubleHiVReg));
+ }
+
+ return true;
+ }
+
+ mirror::Object* this_value_;
+};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_doNativeCallSetVReg(JNIEnv*, jobject value) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<Context> context(Context::Create());
+ TestVisitor visitor(soa.Self(), context.get(), soa.Decode<mirror::Object*>(value));
+ visitor.WalkStack();
+}
+
+} // namespace
+
+} // namespace art
diff --git a/test/455-set-vreg/src/Main.java b/test/455-set-vreg/src/Main.java
new file mode 100644
index 0000000..2172d92
--- /dev/null
+++ b/test/455-set-vreg/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public Main() {
+ }
+
+ int testIntVReg(int a, int b, int c, int d, int e) {
+ doNativeCallSetVReg();
+ return a - b - c - d - e;
+ }
+
+ long testLongVReg(long a, long b, long c, long d, long e) {
+ doNativeCallSetVReg();
+ return a - b - c - d - e;
+ }
+
+ float testFloatVReg(float a, float b, float c, float d, float e) {
+ doNativeCallSetVReg();
+ return a - b - c - d - e;
+ }
+
+ double testDoubleVReg(double a, double b, double c, double d, double e) {
+ doNativeCallSetVReg();
+ return a - b - c - d - e;
+ }
+
+ native void doNativeCallSetVReg();
+
+ static {
+ System.loadLibrary("arttest");
+ }
+
+ public static void main(String[] args) {
+ Main rm = new Main();
+ int intExpected = 5 - 4 - 3 - 2 - 1;
+ int intResult = rm.testIntVReg(0, 0, 0, 0, 0);
+ if (intResult != intExpected) {
+ throw new Error("Expected " + intExpected + ", got " + intResult);
+ }
+
+ long longExpected = Long.MAX_VALUE - 4 - 3 - 2 - 1;
+ long longResult = rm.testLongVReg(0, 0, 0, 0, 0);
+ if (longResult != longExpected) {
+ throw new Error("Expected " + longExpected + ", got " + longResult);
+ }
+
+ float floatExpected = 5.0f - 4.0f - 3.0f - 2.0f - 1.0f;
+ float floatResult = rm.testFloatVReg(0.0f, 0.0f, 0.0f, 0.0f, 0.0f);
+ if (floatResult != floatExpected) {
+ throw new Error("Expected " + floatExpected + ", got " + floatResult);
+ }
+
+ double doubleExpected = 5.0 - 4.0 - 3.0 - 2.0 - 1.0;
+ double doubleResult = rm.testDoubleVReg(0.0, 0.0, 0.0, 0.0, 0.0);
+ if (doubleResult != doubleExpected) {
+ throw new Error("Expected " + doubleExpected + ", got " + doubleResult);
+ }
+ }
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index e64df5c..75c5d72 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -27,7 +27,9 @@
051-thread/thread_test.cc \
116-nodex2oat/nodex2oat.cc \
117-nopatchoat/nopatchoat.cc \
- 118-noimage-dex2oat/noimage-dex2oat.cc
+ 118-noimage-dex2oat/noimage-dex2oat.cc \
+ 454-get-vreg/get_vreg_jni.cc \
+ 455-set-vreg/set_vreg_jni.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ifdef TARGET_2ND_ARCH
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 8e4b46b..10c422e 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -84,6 +84,9 @@
ifeq ($(ART_TEST_INTERPRETER),true)
COMPILER_TYPES += interpreter
endif
+ifeq ($(ART_TEST_JIT),true)
+ COMPILER_TYPES += jit
+endif
ifeq ($(ART_TEST_OPTIMIZING),true)
COMPILER_TYPES += optimizing
endif
@@ -301,6 +304,8 @@
118-noimage-dex2oat \
119-noimage-patchoat \
131-structural-change \
+ 454-get-vreg \
+ 455-set-vreg \
ifneq (,$(filter ndebug,$(RUN_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
@@ -434,7 +439,7 @@
# Create a rule to build and run a tests following the form:
# test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
-# {4: interpreter default optimizing}-{5: relocate no-relocate relocate-no-patchoat}-
+# {4: interpreter default optimizing jit}-{5: relocate no-relocate relocate-no-patchoat}-
# {6: trace or no-trace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
# {9: no-image image picimage}-{10: pictest nopictest}-{11: test name}{12: 32 or 64}
define define-test-art-run-test
@@ -497,7 +502,12 @@
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEFAULT_RULES
run_test_options += --quick
else
- $$(error found $(4) expected $(COMPILER_TYPES))
+ ifeq ($(4),jit)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_JIT_RULES
+ run_test_options += --jit
+ else
+ $$(error found $(4) expected $(COMPILER_TYPES))
+ endif
endif
endif
endif
@@ -561,32 +571,38 @@
endif
endif
endif
+ ifeq ($(4),jit)
+ # Use interpreter image for JIT.
+ image_suffix := interpreter
+ else
+ image_suffix := $(4)
+ endif
ifeq ($(9),no-image)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES
run_test_options += --no-image
# Add the core dependency. This is required for pre-building.
ifeq ($(1),host)
- prereq_rule += $(HOST_CORE_IMAGE_$(4)_no-pic_$(12))
+ prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_$(12))
else
- prereq_rule += $(TARGET_CORE_IMAGE_$(4)_no-pic_$(12))
+ prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_$(12))
endif
else
ifeq ($(9),image)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
# Add the core dependency.
ifeq ($(1),host)
- prereq_rule += $(HOST_CORE_IMAGE_$(4)_no-pic_$(12))
+ prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_$(12))
else
- prereq_rule += $(TARGET_CORE_IMAGE_$(4)_no-pic_$(12))
+ prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_$(12))
endif
else
ifeq ($(9),picimage)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
run_test_options += --pic-image
ifeq ($(1),host)
- prereq_rule += $(HOST_CORE_IMAGE_$(4)_pic_$(12))
+ prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_$(12))
else
- prereq_rule += $(TARGET_CORE_IMAGE_$(4)_pic_$(12))
+ prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_pic_$(12))
endif
else
$$(error found $(9) expected $(IMAGE_TYPES))
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index f64756b..7a2ad1c 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -26,6 +26,7 @@
HAVE_IMAGE="y"
HOST="n"
INTERPRETER="n"
+JIT="n"
INVOKE_WITH=""
ISA=x86
LIBRARY_DIRECTORY="lib"
@@ -127,6 +128,9 @@
elif [ "x$1" = "x--interpreter" ]; then
INTERPRETER="y"
shift
+ elif [ "x$1" = "x--jit" ]; then
+ JIT="y"
+ shift
elif [ "x$1" = "x--jvm" ]; then
USE_JVM="y"
shift
@@ -260,6 +264,16 @@
fi
fi
+if [ "$JIT" = "y" ]; then
+ INT_OPTS="-Xjit"
+ if [ "$VERIFY" = "y" ] ; then
+ COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
+ else
+ COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none"
+ DEX_VERIFY="${DEX_VERIFY} -Xverify:none"
+ fi
+fi
+
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
if [ "$RELOCATE" = "y" ]; then
@@ -347,8 +361,8 @@
export ANDROID_DATA=$DEX_LOCATION && \
export DEX_LOCATION=$DEX_LOCATION && \
export ANDROID_ROOT=$ANDROID_ROOT && \
- export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \
$mkdir_cmdline && \
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \
$dex2oat_cmdline && \
$dalvikvm_cmdline"
diff --git a/test/run-all-tests b/test/run-all-tests
index 318a0de..d0b3cf9 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -53,6 +53,9 @@
elif [ "x$1" = "x--interpreter" ]; then
run_args="${run_args} --interpreter"
shift
+ elif [ "x$1" = "x--jit" ]; then
+ run_args="${run_args} --jit"
+ shift
elif [ "x$1" = "x--no-verify" ]; then
run_args="${run_args} --no-verify"
shift
@@ -126,7 +129,7 @@
echo " $prog [options] Run all tests with the given options."
echo " Options are all passed to run-test; refer to that for " \
"further documentation:"
- echo " --debug --dev --host --interpreter --jvm --no-optimize"
+ echo " --debug --dev --host --interpreter --jit --jvm --no-optimize"
echo " --no-verify -O --update --valgrind --zygote --64 --relocate"
echo " --prebuild --always-clean --gcstress --gcverify --trace"
echo " --no-patchoat --no-dex2oat"
diff --git a/test/run-test b/test/run-test
index 8c47663..52f5e0c 100755
--- a/test/run-test
+++ b/test/run-test
@@ -193,6 +193,10 @@
run_args="${run_args} --interpreter"
image_suffix="-interpreter"
shift
+ elif [ "x$1" = "x--jit" ]; then
+ run_args="${run_args} --jit"
+ image_suffix="-interpreter"
+ shift
elif [ "x$1" = "x--optimizing" ]; then
run_args="${run_args} -Xcompiler-option --compiler-backend=Optimizing"
image_suffix="-optimizing"
@@ -430,6 +434,7 @@
echo " --gdb Run under gdb; incompatible with some tests."
echo " --build-only Build test files only (off by default)."
echo " --interpreter Enable interpreter only mode (off by default)."
+ echo " --jit Enable jit (off by default)."
echo " --optimizing Enable optimizing compiler (off by default)."
echo " --quick Use Quick compiler (default)."
echo " --no-verify Turn off verification (on by default)."
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 4502c02..41d814a 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -63,15 +63,20 @@
"org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition"]
},
{
- description: "Failing due to switched off network stack on volantisg.",
+ description: "Failing due to missing localhost on volantis.",
result: EXEC_FAILED,
modes: [device],
- names: ["libcore.javax.crypto.CipherTest#testCipherInitWithCertificate",
- "org.apache.harmony.luni.tests.internal.net.www.protocol.http.HttpURLConnectionTest",
+ names: ["org.apache.harmony.luni.tests.internal.net.www.protocol.http.HttpURLConnectionTest",
"org.apache.harmony.luni.tests.internal.net.www.protocol.https.HttpsURLConnectionTest",
"org.apache.harmony.luni.tests.java.net.URLConnectionTest"]
},
{
+ description: "Failing due to missing localhost on hammerhead and volantis.",
+ result: EXEC_FAILED,
+ modes: [device],
+ names: ["libcore.javax.crypto.CipherTest#testCipherInitWithCertificate"]
+},
+{
description: "Test timeouts",
result: EXEC_TIMEOUT,
modes: [device],
@@ -94,5 +99,12 @@
result: EXEC_FAILED,
name: "org.apache.harmony.security.tests.java.security.Signature2Test#test_verify$BII",
bug: 18869265
+},
+{
+ description: "Test sometimes timeouts on volantis",
+ result: EXEC_TIMEOUT,
+ modes_variants: [[device,X64]],
+ names: ["libcore.java.lang.SystemTest#testArrayCopyConcurrentModification"],
+ bug: 19165288
}
]