Merge "MIPS64: Clean-up intrinsics code"
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index b3d246c..cd9d18d 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -114,7 +114,8 @@
else
ART_TARGET_CLANG := false
endif
-ART_TARGET_CLANG_arm :=
+# b/25130937
+ART_TARGET_CLANG_arm := false
ART_TARGET_CLANG_arm64 :=
ART_TARGET_CLANG_mips :=
ART_TARGET_CLANG_mips64 :=
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index a561c5f..c53479c 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -89,7 +89,11 @@
HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
+ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
# Classpath for Jack compilation: we only need core-libart.
-HOST_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack)
-TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack)
+HOST_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack
+HOST_JACK_CLASSPATH := $(foreach dep,$(HOST_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
+TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack
+TARGET_JACK_CLASSPATH := $(foreach dep,$(TARGET_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
+endif
endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1b54a51..6295e15 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -204,7 +204,6 @@
runtime/interpreter/safe_math_test.cc \
runtime/interpreter/unstarted_runtime_test.cc \
runtime/java_vm_ext_test.cc \
- runtime/jit/jit_code_cache_test.cc \
runtime/lambda/closure_test.cc \
runtime/lambda/shorty_field_type_test.cc \
runtime/leb128_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 20c8023..17f9d12 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -78,6 +78,7 @@
optimizing/instruction_simplifier.cc \
optimizing/intrinsics.cc \
optimizing/licm.cc \
+ optimizing/load_store_elimination.cc \
optimizing/locations.cc \
optimizing/nodes.cc \
optimizing/optimization.cc \
@@ -93,7 +94,6 @@
optimizing/ssa_phi_elimination.cc \
optimizing/stack_map_stream.cc \
trampolines/trampoline_compiler.cc \
- utils/arena_bit_vector.cc \
utils/assembler.cc \
utils/swap_space.cc \
buffered_output_stream.cc \
@@ -152,6 +152,7 @@
dex/quick/mips/target_mips.cc \
dex/quick/mips/utility_mips.cc \
jni/quick/mips/calling_convention_mips.cc \
+ optimizing/code_generator_mips.cc \
utils/mips/assembler_mips.cc \
utils/mips/managed_register_mips.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 1727657..58a2f96 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -32,6 +32,7 @@
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "mirror/object-inl.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
#include "utils.h"
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index 4de3410..445859c 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -18,6 +18,7 @@
#include "gvn_dead_code_elimination.h"
+#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
#include "base/macros.h"
#include "base/allocator.h"
@@ -26,7 +27,6 @@
#include "dex_instruction.h"
#include "dex/mir_graph.h"
#include "local_value_numbering.h"
-#include "utils/arena_bit_vector.h"
namespace art {
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index bd00690..f98969e 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -797,6 +797,10 @@
}
}
+static constexpr int64_t shift_minus_1(size_t by) {
+ return static_cast<int64_t>(static_cast<uint64_t>(INT64_C(-1)) << by);
+}
+
TEST_F(LocalValueNumberingTest, ConstWide) {
static const MIRDef mirs[] = {
// Core reg constants.
@@ -804,45 +808,45 @@
DEF_CONST(Instruction::CONST_WIDE_16, 2u, 1),
DEF_CONST(Instruction::CONST_WIDE_16, 4u, -1),
DEF_CONST(Instruction::CONST_WIDE_32, 6u, 1 << 16),
- DEF_CONST(Instruction::CONST_WIDE_32, 8u, -1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 8u, shift_minus_1(16)),
DEF_CONST(Instruction::CONST_WIDE_32, 10u, (1 << 16) + 1),
DEF_CONST(Instruction::CONST_WIDE_32, 12u, (1 << 16) - 1),
DEF_CONST(Instruction::CONST_WIDE_32, 14u, -(1 << 16) + 1),
DEF_CONST(Instruction::CONST_WIDE_32, 16u, -(1 << 16) - 1),
DEF_CONST(Instruction::CONST_WIDE, 18u, INT64_C(1) << 32),
- DEF_CONST(Instruction::CONST_WIDE, 20u, INT64_C(-1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 20u, shift_minus_1(32)),
DEF_CONST(Instruction::CONST_WIDE, 22u, (INT64_C(1) << 32) + 1),
DEF_CONST(Instruction::CONST_WIDE, 24u, (INT64_C(1) << 32) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 26u, (INT64_C(-1) << 32) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 28u, (INT64_C(-1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 26u, shift_minus_1(32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 28u, shift_minus_1(32) - 1),
DEF_CONST(Instruction::CONST_WIDE_HIGH16, 30u, 1), // Effectively 1 << 48.
DEF_CONST(Instruction::CONST_WIDE_HIGH16, 32u, 0xffff), // Effectively -1 << 48.
DEF_CONST(Instruction::CONST_WIDE, 34u, (INT64_C(1) << 48) + 1),
DEF_CONST(Instruction::CONST_WIDE, 36u, (INT64_C(1) << 48) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 38u, (INT64_C(-1) << 48) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 40u, (INT64_C(-1) << 48) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 38u, shift_minus_1(48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 40u, shift_minus_1(48) - 1),
// FP reg constants.
DEF_CONST(Instruction::CONST_WIDE_16, 42u, 0),
DEF_CONST(Instruction::CONST_WIDE_16, 44u, 1),
DEF_CONST(Instruction::CONST_WIDE_16, 46u, -1),
DEF_CONST(Instruction::CONST_WIDE_32, 48u, 1 << 16),
- DEF_CONST(Instruction::CONST_WIDE_32, 50u, -1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 50u, shift_minus_1(16)),
DEF_CONST(Instruction::CONST_WIDE_32, 52u, (1 << 16) + 1),
DEF_CONST(Instruction::CONST_WIDE_32, 54u, (1 << 16) - 1),
DEF_CONST(Instruction::CONST_WIDE_32, 56u, -(1 << 16) + 1),
DEF_CONST(Instruction::CONST_WIDE_32, 58u, -(1 << 16) - 1),
DEF_CONST(Instruction::CONST_WIDE, 60u, INT64_C(1) << 32),
- DEF_CONST(Instruction::CONST_WIDE, 62u, INT64_C(-1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 62u, shift_minus_1(32)),
DEF_CONST(Instruction::CONST_WIDE, 64u, (INT64_C(1) << 32) + 1),
DEF_CONST(Instruction::CONST_WIDE, 66u, (INT64_C(1) << 32) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 68u, (INT64_C(-1) << 32) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 70u, (INT64_C(-1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 68u, shift_minus_1(32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 70u, shift_minus_1(32) - 1),
DEF_CONST(Instruction::CONST_WIDE_HIGH16, 72u, 1), // Effectively 1 << 48.
DEF_CONST(Instruction::CONST_WIDE_HIGH16, 74u, 0xffff), // Effectively -1 << 48.
DEF_CONST(Instruction::CONST_WIDE, 76u, (INT64_C(1) << 48) + 1),
DEF_CONST(Instruction::CONST_WIDE, 78u, (INT64_C(1) << 48) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 80u, (INT64_C(-1) << 48) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 82u, (INT64_C(-1) << 48) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 80u, shift_minus_1(48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 82u, shift_minus_1(48) - 1),
};
PrepareMIRs(mirs);
@@ -868,7 +872,7 @@
DEF_CONST(Instruction::CONST_4, 1u, 1),
DEF_CONST(Instruction::CONST_4, 2u, -1),
DEF_CONST(Instruction::CONST_16, 3u, 1 << 4),
- DEF_CONST(Instruction::CONST_16, 4u, -1 << 4),
+ DEF_CONST(Instruction::CONST_16, 4u, shift_minus_1(4)),
DEF_CONST(Instruction::CONST_16, 5u, (1 << 4) + 1),
DEF_CONST(Instruction::CONST_16, 6u, (1 << 4) - 1),
DEF_CONST(Instruction::CONST_16, 7u, -(1 << 4) + 1),
@@ -877,14 +881,14 @@
DEF_CONST(Instruction::CONST_HIGH16, 10u, 0xffff), // Effectively -1 << 16.
DEF_CONST(Instruction::CONST, 11u, (1 << 16) + 1),
DEF_CONST(Instruction::CONST, 12u, (1 << 16) - 1),
- DEF_CONST(Instruction::CONST, 13u, (-1 << 16) + 1),
- DEF_CONST(Instruction::CONST, 14u, (-1 << 16) - 1),
+ DEF_CONST(Instruction::CONST, 13u, shift_minus_1(16) + 1),
+ DEF_CONST(Instruction::CONST, 14u, shift_minus_1(16) - 1),
// FP reg constants.
DEF_CONST(Instruction::CONST_4, 15u, 0),
DEF_CONST(Instruction::CONST_4, 16u, 1),
DEF_CONST(Instruction::CONST_4, 17u, -1),
DEF_CONST(Instruction::CONST_16, 18u, 1 << 4),
- DEF_CONST(Instruction::CONST_16, 19u, -1 << 4),
+ DEF_CONST(Instruction::CONST_16, 19u, shift_minus_1(4)),
DEF_CONST(Instruction::CONST_16, 20u, (1 << 4) + 1),
DEF_CONST(Instruction::CONST_16, 21u, (1 << 4) - 1),
DEF_CONST(Instruction::CONST_16, 22u, -(1 << 4) + 1),
@@ -893,8 +897,8 @@
DEF_CONST(Instruction::CONST_HIGH16, 25u, 0xffff), // Effectively -1 << 16.
DEF_CONST(Instruction::CONST, 26u, (1 << 16) + 1),
DEF_CONST(Instruction::CONST, 27u, (1 << 16) - 1),
- DEF_CONST(Instruction::CONST, 28u, (-1 << 16) + 1),
- DEF_CONST(Instruction::CONST, 29u, (-1 << 16) - 1),
+ DEF_CONST(Instruction::CONST, 28u, shift_minus_1(16) + 1),
+ DEF_CONST(Instruction::CONST, 29u, shift_minus_1(16) - 1),
// null reference constant.
DEF_CONST(Instruction::CONST_4, 30u, 0),
};
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 097abdc..2da8a98 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -19,6 +19,7 @@
#include <stdint.h>
+#include "base/arena_bit_vector.h"
#include "base/arena_containers.h"
#include "base/bit_utils.h"
#include "base/scoped_arena_containers.h"
@@ -30,7 +31,6 @@
#include "mir_method_info.h"
#include "reg_location.h"
#include "reg_storage.h"
-#include "utils/arena_bit_vector.h"
namespace art {
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 64becb9..e5d3841 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -21,6 +21,7 @@
#include "dex/compiler_ir.h"
#include "dex/quick/mir_to_lir.h"
#include "oat.h"
+#include "oat_quick_method_header.h"
#include "utils.h"
#include "x86_lir.h"
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 25fb886..75f3fef 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -31,6 +31,7 @@
#include "mirror/array-inl.h"
#include "mirror/string.h"
#include "oat.h"
+#include "oat_quick_method_header.h"
#include "x86_lir.h"
namespace art {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 8324bf3..b956584 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -594,7 +594,7 @@
}
} else if ((access_flags & kAccAbstract) != 0) {
// Abstract methods don't have code.
- } else {
+ } else if (Runtime::Current()->IsAotCompiler()) {
const VerifiedMethod* verified_method =
driver->GetVerificationResults()->GetVerifiedMethod(method_ref);
bool compile = compilation_enabled &&
@@ -633,6 +633,13 @@
? dex_to_dex_compilation_level
: optimizer::DexToDexCompilationLevel::kRequired);
}
+ } else {
+ // This is for the JIT compiler, which has already ensured the class is verified.
+ // We can go straight to compiling.
+ DCHECK(Runtime::Current()->UseJit());
+ compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type,
+ class_def_idx, method_idx, class_loader,
+ dex_file, dex_cache);
}
if (kTimeCompileMethod) {
uint64_t duration_ns = NanoTime() - start_ns;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index a45df95..3d1b42f 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -30,6 +30,7 @@
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "oat_file-inl.h"
+#include "oat_quick_method_header.h"
#include "object_lock.h"
#include "thread_list.h"
#include "verifier/method_verifier-inl.h"
@@ -157,61 +158,66 @@
StackHandleScope<2> hs(self);
self->AssertNoPendingException();
Runtime* runtime = Runtime::Current();
+
+ // Check if the method is already compiled.
if (runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) {
VLOG(jit) << "Already compiled " << PrettyMethod(method);
- return true; // Already compiled
+ return true;
}
+
+ // Don't compile the method if we are supposed to be deoptimized.
+ if (runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) {
+ return false;
+ }
+
+ // Ensure the class is initialized.
Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
- {
- TimingLogger::ScopedTiming t2("Initializing", &logger);
- if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
- VLOG(jit) << "JIT failed to initialize " << PrettyMethod(method);
- return false;
- }
+ if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+ VLOG(jit) << "JIT failed to initialize " << PrettyMethod(method);
+ return false;
}
- const DexFile* dex_file = h_class->GetDexCache()->GetDexFile();
- MethodReference method_ref(dex_file, method->GetDexMethodIndex());
- // Only verify if we don't already have verification results.
- if (verification_results_->GetVerifiedMethod(method_ref) == nullptr) {
- TimingLogger::ScopedTiming t2("Verifying", &logger);
- std::string error;
- if (verifier::MethodVerifier::VerifyMethod(method, true, &error) ==
- verifier::MethodVerifier::kHardFailure) {
- VLOG(jit) << "Not compile method " << PrettyMethod(method)
- << " due to verification failure " << error;
- return false;
- }
- }
+
+ // Do the compilation.
CompiledMethod* compiled_method = nullptr;
{
TimingLogger::ScopedTiming t2("Compiling", &logger);
compiled_method = compiler_driver_->CompileArtMethod(self, method);
}
+
+ // Trim maps to reduce memory usage.
+ // TODO: measure how much this increases compile time.
{
TimingLogger::ScopedTiming t2("TrimMaps", &logger);
- // Trim maps to reduce memory usage, TODO: measure how much this increases compile time.
runtime->GetArenaPool()->TrimMaps();
}
+
+ // Check if we failed compiling.
if (compiled_method == nullptr) {
return false;
}
+
total_time_ += NanoTime() - start_time;
- // Don't add the method if we are supposed to be deoptimized.
bool result = false;
- if (!runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) {
- const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(method);
- if (code != nullptr) {
- // Already have some compiled code, just use this instead of linking.
- // TODO: Fix recompilation.
- method->SetEntryPointFromQuickCompiledCode(code);
+ const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(method);
+
+ if (code != nullptr) {
+ // Already have some compiled code, just use this instead of linking.
+ // TODO: Fix recompilation.
+ method->SetEntryPointFromQuickCompiledCode(code);
+ result = true;
+ } else {
+ TimingLogger::ScopedTiming t2("LinkCode", &logger);
+ OatFile::OatMethod oat_method(nullptr, 0);
+ if (AddToCodeCache(method, compiled_method, &oat_method)) {
+ oat_method.LinkMethod(method);
+ CHECK(runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) << PrettyMethod(method);
result = true;
- } else {
- TimingLogger::ScopedTiming t2("MakeExecutable", &logger);
- result = MakeExecutable(compiled_method, method);
}
}
+
// Remove the compiled method to save memory.
- compiler_driver_->RemoveCompiledMethod(method_ref);
+ compiler_driver_->RemoveCompiledMethod(
+ MethodReference(h_class->GetDexCache()->GetDexFile(), method->GetDexMethodIndex()));
runtime->GetJit()->AddTimingLogger(logger);
return result;
}
@@ -220,41 +226,8 @@
return callbacks_.get();
}
-uint8_t* JitCompiler::WriteMethodHeaderAndCode(const CompiledMethod* compiled_method,
- uint8_t* reserve_begin, uint8_t* reserve_end,
- const uint8_t* mapping_table,
- const uint8_t* vmap_table,
- const uint8_t* gc_map) {
- reserve_begin += sizeof(OatQuickMethodHeader);
- reserve_begin = reinterpret_cast<uint8_t*>(
- compiled_method->AlignCode(reinterpret_cast<uintptr_t>(reserve_begin)));
- const auto* quick_code = compiled_method->GetQuickCode();
- CHECK_LE(reserve_begin, reserve_end);
- CHECK_LE(quick_code->size(), static_cast<size_t>(reserve_end - reserve_begin));
- auto* code_ptr = reserve_begin;
- OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
- // Construct the header last.
- const auto frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
- const auto core_spill_mask = compiled_method->GetCoreSpillMask();
- const auto fp_spill_mask = compiled_method->GetFpSpillMask();
- const auto code_size = quick_code->size();
- CHECK_NE(code_size, 0U);
- std::copy(quick_code->data(), quick_code->data() + code_size, code_ptr);
- // After we are done writing we need to update the method header.
- // Write out the method header last.
- method_header = new(method_header) OatQuickMethodHeader(
- (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
- (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
- (gc_map == nullptr) ? 0 : code_ptr - gc_map,
- frame_size_in_bytes,
- core_spill_mask,
- fp_spill_mask,
- code_size);
- // Return the code ptr.
- return code_ptr;
-}
-
-bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method,
+bool JitCompiler::AddToCodeCache(ArtMethod* method,
+ const CompiledMethod* compiled_method,
OatFile::OatMethod* out_method) {
Runtime* runtime = Runtime::Current();
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
@@ -264,7 +237,6 @@
}
const auto code_size = quick_code->size();
Thread* const self = Thread::Current();
- const uint8_t* base = code_cache->CodeCachePtr();
auto* const mapping_table = compiled_method->GetMappingTable();
auto* const vmap_table = compiled_method->GetVmapTable();
auto* const gc_map = compiled_method->GetGcMap();
@@ -297,45 +269,35 @@
}
}
- // Don't touch this until you protect / unprotect the code.
- const size_t reserve_size = sizeof(OatQuickMethodHeader) + quick_code->size() + 32;
- uint8_t* const code_reserve = code_cache->ReserveCode(self, reserve_size);
- if (code_reserve == nullptr) {
+ uint8_t* const code = code_cache->CommitCode(self,
+ mapping_table_ptr,
+ vmap_table_ptr,
+ gc_map_ptr,
+ compiled_method->GetFrameSizeInBytes(),
+ compiled_method->GetCoreSpillMask(),
+ compiled_method->GetFpSpillMask(),
+ compiled_method->GetQuickCode()->data(),
+ compiled_method->GetQuickCode()->size());
+
+ if (code == nullptr) {
return false;
}
- auto* code_ptr = WriteMethodHeaderAndCode(
- compiled_method, code_reserve, code_reserve + reserve_size, mapping_table_ptr,
- vmap_table_ptr, gc_map_ptr);
-
- __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
- reinterpret_cast<char*>(code_ptr + quick_code->size()));
const size_t thumb_offset = compiled_method->CodeDelta();
- const uint32_t code_offset = code_ptr - base + thumb_offset;
- *out_method = OatFile::OatMethod(base, code_offset);
+ const uint32_t code_offset = sizeof(OatQuickMethodHeader) + thumb_offset;
+ *out_method = OatFile::OatMethod(code, code_offset);
DCHECK_EQ(out_method->GetGcMap(), gc_map_ptr);
DCHECK_EQ(out_method->GetMappingTable(), mapping_table_ptr);
DCHECK_EQ(out_method->GetVmapTable(), vmap_table_ptr);
DCHECK_EQ(out_method->GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes());
DCHECK_EQ(out_method->GetCoreSpillMask(), compiled_method->GetCoreSpillMask());
DCHECK_EQ(out_method->GetFpSpillMask(), compiled_method->GetFpSpillMask());
- VLOG(jit) << "JIT added " << PrettyMethod(method) << "@" << method << " ccache_size="
- << PrettySize(code_cache->CodeCacheSize()) << ": " << reinterpret_cast<void*>(code_ptr)
- << "," << reinterpret_cast<void*>(code_ptr + code_size);
- return true;
-}
-
-bool JitCompiler::MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method) {
- CHECK(method != nullptr);
- CHECK(compiled_method != nullptr);
- OatFile::OatMethod oat_method(nullptr, 0);
- if (!AddToCodeCache(method, compiled_method, &oat_method)) {
- return false;
- }
- // TODO: Flush instruction cache.
- oat_method.LinkMethod(method);
- CHECK(Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method))
- << PrettyMethod(method);
+ VLOG(jit)
+ << "JIT added "
+ << PrettyMethod(method) << "@" << method
+ << " ccache_size=" << PrettySize(code_cache->CodeCacheSize()) << ": "
+ << reinterpret_cast<void*>(code + code_offset)
+ << "," << reinterpret_cast<void*>(code + code_offset + code_size);
return true;
}
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index ef68caa..757f3f3 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -39,10 +39,6 @@
virtual ~JitCompiler();
bool CompileMethod(Thread* self, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_);
- // This is in the compiler since the runtime doesn't have access to the compiled method
- // structures.
- bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method,
- OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_);
CompilerCallbacks* GetCompilerCallbacks() const;
size_t GetTotalCompileTime() const {
return total_time_;
@@ -58,12 +54,13 @@
std::unique_ptr<CompilerDriver> compiler_driver_;
std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
- explicit JitCompiler();
- uint8_t* WriteMethodHeaderAndCode(
- const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end,
- const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
- bool MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ JitCompiler();
+
+ // This is in the compiler since the runtime doesn't have access to the compiled method
+ // structures.
+ bool AddToCodeCache(ArtMethod* method,
+ const CompiledMethod* compiled_method,
+ OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index ceace82..cb9ea38 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -18,6 +18,7 @@
#include "compiled_method.h"
#include "oat.h"
+#include "oat_quick_method_header.h"
#include "output_stream.h"
namespace art {
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index 13f67e6..5515313 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -16,6 +16,7 @@
#include "linker/relative_patcher_test.h"
#include "linker/arm/relative_patcher_thumb2.h"
+#include "oat_quick_method_header.h"
namespace art {
namespace linker {
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 6b9c530..6f234a8 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -22,6 +22,7 @@
#include "driver/compiler_driver.h"
#include "utils/arm64/assembler_arm64.h"
#include "oat.h"
+#include "oat_quick_method_header.h"
#include "output_stream.h"
namespace art {
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index b3af4c6..857d584 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -16,6 +16,7 @@
#include "linker/relative_patcher_test.h"
#include "linker/arm64/relative_patcher_arm64.h"
+#include "oat_quick_method_header.h"
namespace art {
namespace linker {
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 31d1bce..e357662 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -30,6 +30,7 @@
#include "linker/relative_patcher.h"
#include "method_reference.h"
#include "oat.h"
+#include "oat_quick_method_header.h"
#include "utils/array_ref.h"
#include "vector_output_stream.h"
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index a78a5b3..640698b 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -39,6 +39,7 @@
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
+#include "oat_quick_method_header.h"
#include "os.h"
#include "output_stream.h"
#include "safe_map.h"
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index ce6dc75..c9afdf2 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -71,9 +71,9 @@
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
HInstruction* parameter1 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimNot); // array
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimInt); // i
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -168,9 +168,9 @@
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
HInstruction* parameter1 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimNot); // array
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimInt); // i
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -232,9 +232,9 @@
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
HInstruction* parameter1 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimNot); // array
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimInt); // i
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -295,7 +295,8 @@
HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_5 = graph_->GetIntConstant(5);
@@ -363,7 +364,8 @@
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_initial = graph->GetIntConstant(initial);
@@ -477,7 +479,8 @@
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_initial = graph->GetIntConstant(initial);
@@ -689,7 +692,8 @@
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_initial = graph->GetIntConstant(initial);
@@ -791,7 +795,8 @@
HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_0 = graph_->GetIntConstant(0);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 21540e8..8ca352f 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -159,9 +159,13 @@
int locals_index = locals_.size() - number_of_parameters;
int parameter_index = 0;
+ const DexFile::MethodId& referrer_method_id =
+ dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
- HParameterValue* parameter = new (arena_) HParameterValue(parameter_index++,
+ HParameterValue* parameter = new (arena_) HParameterValue(*dex_file_,
+ referrer_method_id.class_idx_,
+ parameter_index++,
Primitive::kPrimNot,
true);
entry_block_->AddInstruction(parameter);
@@ -170,11 +174,16 @@
number_of_parameters--;
}
- uint32_t pos = 1;
- for (int i = 0; i < number_of_parameters; i++) {
- HParameterValue* parameter = new (arena_) HParameterValue(parameter_index++,
- Primitive::GetType(shorty[pos++]),
- false);
+ const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
+ const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
+ for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) {
+ HParameterValue* parameter = new (arena_) HParameterValue(
+ *dex_file_,
+ arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
+ parameter_index++,
+ Primitive::GetType(shorty[shorty_pos]),
+ false);
+ ++shorty_pos;
entry_block_->AddInstruction(parameter);
HLocal* local = GetLocalAt(locals_index++);
// Store the parameter value in the local that the dex code will use
@@ -1232,12 +1241,14 @@
field_index,
dex_pc);
} else {
+ uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
field_set = new (arena_) HInstanceFieldSet(null_check,
value,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
+ class_def_index,
*dex_file_,
dex_compilation_unit_->GetDexCache(),
dex_pc);
@@ -1252,11 +1263,13 @@
field_index,
dex_pc);
} else {
+ uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
field_get = new (arena_) HInstanceFieldGet(null_check,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
+ class_def_index,
*dex_file_,
dex_compilation_unit_->GetDexCache(),
dex_pc);
@@ -1398,6 +1411,8 @@
cls = new (arena_) HClinitCheck(constant, dex_pc);
current_block_->AddInstruction(cls);
}
+
+ uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
if (is_put) {
// We need to keep the class alive before loading the value.
Temporaries temps(graph_);
@@ -1410,6 +1425,7 @@
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
+ class_def_index,
*dex_file_,
dex_cache_,
dex_pc));
@@ -1419,6 +1435,7 @@
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
+ class_def_index,
*dex_file_,
dex_cache_,
dex_pc));
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6a743eb..1c62dfa 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -32,6 +32,10 @@
#include "code_generator_x86_64.h"
#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+#include "code_generator_mips.h"
+#endif
+
#ifdef ART_ENABLE_CODEGEN_mips64
#include "code_generator_mips64.h"
#endif
@@ -742,11 +746,12 @@
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips:
- UNUSED(compiler_options);
- UNUSED(graph);
- UNUSED(isa_features);
- return nullptr;
+ case kMips: {
+ return new mips::CodeGeneratorMIPS(graph,
+ *isa_features.AsMipsInstructionSetFeatures(),
+ compiler_options,
+ stats);
+ }
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f68b11b..1773c06 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1580,6 +1580,21 @@
HandleBinaryOp(instruction);
}
+void LocationsBuilderARM64::VisitArm64IntermediateAddress(HArm64IntermediateAddress* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction));
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitArm64IntermediateAddress(
+ HArm64IntermediateAddress* instruction) {
+ __ Add(OutputRegister(instruction),
+ InputRegisterAt(instruction, 0),
+ Operand(InputOperandAt(instruction, 1)));
+}
+
void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -1593,14 +1608,16 @@
}
void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
- LocationSummary* locations = instruction->GetLocations();
Primitive::Type type = instruction->GetType();
Register obj = InputRegisterAt(instruction, 0);
- Location index = locations->InAt(1);
+ Location index = instruction->GetLocations()->InAt(1);
size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
MemOperand source = HeapOperand(obj);
+ CPURegister dest = OutputCPURegister(instruction);
+
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
+ // Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
BlockPoolsScope block_pools(masm);
if (index.IsConstant()) {
@@ -1608,15 +1625,26 @@
source = HeapOperand(obj, offset);
} else {
Register temp = temps.AcquireSameSizeAs(obj);
- __ Add(temp, obj, offset);
+ if (instruction->GetArray()->IsArm64IntermediateAddress()) {
+ // We do not need to compute the intermediate address from the array: the
+ // input instruction has done it already. See the comment in
+ // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
+ if (kIsDebugBuild) {
+ HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
+ DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+ }
+ temp = obj;
+ } else {
+ __ Add(temp, obj, offset);
+ }
source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
}
- codegen_->Load(type, OutputCPURegister(instruction), source);
+ codegen_->Load(type, dest, source);
codegen_->MaybeRecordImplicitNullCheck(instruction);
- if (type == Primitive::kPrimNot) {
- GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W());
+ if (instruction->GetType() == Primitive::kPrimNot) {
+ GetAssembler()->MaybeUnpoisonHeapReference(dest.W());
}
}
@@ -1670,7 +1698,18 @@
} else {
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireSameSizeAs(array);
- __ Add(temp, array, offset);
+ if (instruction->GetArray()->IsArm64IntermediateAddress()) {
+ // We do not need to compute the intermediate address from the array: the
+ // input instruction has done it already. See the comment in
+ // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
+ if (kIsDebugBuild) {
+ HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
+ DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+ }
+ temp = array;
+ } else {
+ __ Add(temp, array, offset);
+ }
destination = HeapOperand(temp,
XRegisterFrom(index),
LSL,
@@ -1680,6 +1719,7 @@
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
DCHECK(needs_write_barrier);
+ DCHECK(!instruction->GetArray()->IsArm64IntermediateAddress());
vixl::Label done;
SlowPathCodeARM64* slow_path = nullptr;
{
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a068b48..799f1bd 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -382,7 +382,7 @@
uint32_t dex_pc,
SlowPathCode* slow_path);
- ParallelMoveResolverARM64* GetMoveResolver() { return &move_resolver_; }
+ ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return false;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
new file mode 100644
index 0000000..8ba4556
--- /dev/null
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -0,0 +1,4186 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_mips.h"
+
+#include "arch/mips/entrypoints_direct_mips.h"
+#include "arch/mips/instruction_set_features_mips.h"
+#include "art_method.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "gc/accounting/card_table.h"
+#include "intrinsics.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "offsets.h"
+#include "thread.h"
+#include "utils/assembler.h"
+#include "utils/mips/assembler_mips.h"
+#include "utils/stack_checks.h"
+
+namespace art {
+namespace mips {
+
+static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr Register kMethodRegisterArgument = A0;
+
+// We need extra temporary/scratch registers (in addition to AT) in some cases.
+static constexpr Register TMP = T8;
+static constexpr FRegister FTMP = F8;
+
+// ART Thread Register.
+static constexpr Register TR = S1;
+
+Location MipsReturnLocation(Primitive::Type return_type) {
+ switch (return_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ return Location::RegisterLocation(V0);
+
+ case Primitive::kPrimLong:
+ return Location::RegisterPairLocation(V0, V1);
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ return Location::FpuRegisterLocation(F0);
+
+ case Primitive::kPrimVoid:
+ return Location();
+ }
+ UNREACHABLE();
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetReturnLocation(Primitive::Type type) const {
+ return MipsReturnLocation(type);
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetMethodLocation() const {
+ return Location::RegisterLocation(kMethodRegisterArgument);
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(Primitive::Type type) {
+ Location next_location;
+
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ uint32_t gp_index = gp_index_++;
+ if (gp_index < calling_convention.GetNumberOfRegisters()) {
+ next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index));
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Location::StackSlot(stack_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t gp_index = gp_index_;
+ gp_index_ += 2;
+ if (gp_index + 1 < calling_convention.GetNumberOfRegisters()) {
+ if (calling_convention.GetRegisterAt(gp_index) == A1) {
+ gp_index_++; // Skip A1, and use A2_A3 instead.
+ gp_index++;
+ }
+ Register low_even = calling_convention.GetRegisterAt(gp_index);
+ Register high_odd = calling_convention.GetRegisterAt(gp_index + 1);
+ DCHECK_EQ(low_even + 1, high_odd);
+ next_location = Location::RegisterPairLocation(low_even, high_odd);
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Location::DoubleStackSlot(stack_offset);
+ }
+ break;
+ }
+
+ // Note: both float and double types are stored in even FPU registers. On 32 bit FPU, double
+ // will take up the even/odd pair, while floats are stored in even regs only.
+ // On 64 bit FPU, both double and float are stored in even registers only.
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ uint32_t float_index = float_index_++;
+ if (float_index < calling_convention.GetNumberOfFpuRegisters()) {
+ next_location = Location::FpuRegisterLocation(
+ calling_convention.GetFpuRegisterAt(float_index));
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
+ : Location::StackSlot(stack_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected parameter type " << type;
+ break;
+ }
+
+ // Space on the stack is reserved for all arguments.
+ stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
+
+ return next_location;
+}
+
+Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
+ return MipsReturnLocation(type);
+}
+
+#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+
+class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(locations->InAt(0),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimInt,
+ locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt);
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickThrowArrayBounds));
+ CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
+
+ private:
+ HBoundsCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
+};
+
+class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickThrowDivZero));
+ CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
+};
+
+class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ LoadClassSlowPathMIPS(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
+
+ int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType);
+ bool direct = do_clinit_ ? IsDirectEntrypoint(kQuickInitializeStaticStorage)
+ : IsDirectEntrypoint(kQuickInitializeType);
+
+ mips_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this, direct);
+ if (do_clinit_) {
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ } else {
+ CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ }
+
+ // Move the class to the desired location.
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ Primitive::Type type = at_->GetType();
+ mips_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
+ }
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
+};
+
+class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit LoadStringSlowPathMIPS(HLoadString* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickResolveString));
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+ Primitive::Type type = instruction_->GetType();
+ mips_codegen->MoveLocation(locations->Out(),
+ calling_convention.GetReturnLocation(type),
+ type);
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
+};
+
+class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit NullCheckSlowPathMIPS(HNullCheck* instr) : instruction_(instr) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickThrowNullPointer));
+ CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
+
+ private:
+ HNullCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
+};
+
+class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
+ : instruction_(instruction), successor_(successor) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickTestSuspend));
+ CheckEntrypointTypes<kQuickTestSuspend, void, void>();
+ RestoreLiveRegisters(codegen, instruction_->GetLocations());
+ if (successor_ == nullptr) {
+ __ B(GetReturnLabel());
+ } else {
+ __ B(mips_codegen->GetLabelOf(successor_));
+ }
+ }
+
+ MipsLabel* GetReturnLabel() {
+ DCHECK(successor_ == nullptr);
+ return &return_label_;
+ }
+
+ const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
+
+ private:
+ HSuspendCheck* const instruction_;
+ // If not null, the block to branch to after the suspend check.
+ HBasicBlock* const successor_;
+
+ // If `successor_` is null, the label to branch to after the suspend check.
+ MipsLabel return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS);
+};
+
+class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit TypeCheckSlowPathMIPS(HInstruction* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
+ uint32_t dex_pc = instruction_->GetDexPc();
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ object_class,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
+
+ if (instruction_->IsInstanceOf()) {
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction_,
+ dex_pc,
+ this,
+ IsDirectEntrypoint(kQuickInstanceofNonTrivial));
+ Primitive::Type ret_type = instruction_->GetType();
+ Location ret_loc = calling_convention.GetReturnLocation(ret_type);
+ mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial,
+ uint32_t,
+ const mirror::Class*,
+ const mirror::Class*>();
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction_,
+ dex_pc,
+ this,
+ IsDirectEntrypoint(kQuickCheckCast));
+ CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ }
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
+
+ private:
+ HInstruction* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS);
+};
+
+class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit DeoptimizationSlowPathMIPS(HInstruction* instruction)
+ : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ DCHECK(instruction_->IsDeoptimize());
+ HDeoptimize* deoptimize = instruction_->AsDeoptimize();
+ uint32_t dex_pc = deoptimize->GetDexPc();
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ dex_pc,
+ this,
+ IsDirectEntrypoint(kQuickDeoptimize));
+ }
+
+ const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
+
+ private:
+ HInstruction* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
+};
+
+CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
+ const MipsInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
+ : CodeGenerator(graph,
+ kNumberOfCoreRegisters,
+ kNumberOfFRegisters,
+ kNumberOfRegisterPairs,
+ ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
+ arraysize(kCoreCalleeSaves)),
+ ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
+ arraysize(kFpuCalleeSaves)),
+ compiler_options,
+ stats),
+ block_labels_(nullptr),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this),
+ move_resolver_(graph->GetArena(), this),
+ assembler_(&isa_features),
+ isa_features_(isa_features) {
+ // Save RA (containing the return address) to mimic Quick.
+ AddAllocatedRegister(Location::RegisterLocation(RA));
+}
+
+#undef __
+#define __ down_cast<MipsAssembler*>(GetAssembler())->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+
+void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
+ // Ensure that we fix up branches.
+ __ FinalizeCode();
+
+ // Adjust native pc offsets in stack maps.
+ for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+ uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+ uint32_t new_position = __ GetAdjustedPosition(old_position);
+ DCHECK_GE(new_position, old_position);
+ stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+ }
+
+ // Adjust pc offsets for the disassembly information.
+ if (disasm_info_ != nullptr) {
+ GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
+ frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
+ frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
+ for (auto& it : *disasm_info_->GetInstructionIntervals()) {
+ it.second.start = __ GetAdjustedPosition(it.second.start);
+ it.second.end = __ GetAdjustedPosition(it.second.end);
+ }
+ for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
+ it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
+ it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
+ }
+ }
+
+ CodeGenerator::Finalize(allocator);
+}
+
+MipsAssembler* ParallelMoveResolverMIPS::GetAssembler() const {
+ return codegen_->GetAssembler();
+}
+
+void ParallelMoveResolverMIPS::EmitMove(size_t index) {
+ DCHECK_LT(index, moves_.size());
+ MoveOperands* move = moves_[index];
+ codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
+}
+
+void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
+ DCHECK_LT(index, moves_.size());
+ MoveOperands* move = moves_[index];
+ Primitive::Type type = move->GetType();
+ Location loc1 = move->GetDestination();
+ Location loc2 = move->GetSource();
+
+ DCHECK(!loc1.IsConstant());
+ DCHECK(!loc2.IsConstant());
+
+ if (loc1.Equals(loc2)) {
+ return;
+ }
+
+ if (loc1.IsRegister() && loc2.IsRegister()) {
+ // Swap 2 GPRs.
+ Register r1 = loc1.AsRegister<Register>();
+ Register r2 = loc2.AsRegister<Register>();
+ __ Move(TMP, r2);
+ __ Move(r2, r1);
+ __ Move(r1, TMP);
+ } else if (loc1.IsFpuRegister() && loc2.IsFpuRegister()) {
+ FRegister f1 = loc1.AsFpuRegister<FRegister>();
+ FRegister f2 = loc2.AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ MovS(FTMP, f2);
+ __ MovS(f2, f1);
+ __ MovS(f1, FTMP);
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ MovD(FTMP, f2);
+ __ MovD(f2, f1);
+ __ MovD(f1, FTMP);
+ }
+ } else if ((loc1.IsRegister() && loc2.IsFpuRegister()) ||
+ (loc1.IsFpuRegister() && loc2.IsRegister())) {
+ // Swap FPR and GPR.
+ DCHECK_EQ(type, Primitive::kPrimFloat); // Can only swap a float.
+ FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
+ : loc2.AsFpuRegister<FRegister>();
+ Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>()
+ : loc2.AsRegister<Register>();
+ __ Move(TMP, r2);
+ __ Mfc1(r2, f1);
+ __ Mtc1(TMP, f1);
+ } else if (loc1.IsRegisterPair() && loc2.IsRegisterPair()) {
+ // Swap 2 GPR register pairs.
+ Register r1 = loc1.AsRegisterPairLow<Register>();
+ Register r2 = loc2.AsRegisterPairLow<Register>();
+ __ Move(TMP, r2);
+ __ Move(r2, r1);
+ __ Move(r1, TMP);
+ r1 = loc1.AsRegisterPairHigh<Register>();
+ r2 = loc2.AsRegisterPairHigh<Register>();
+ __ Move(TMP, r2);
+ __ Move(r2, r1);
+ __ Move(r1, TMP);
+ } else if ((loc1.IsRegisterPair() && loc2.IsFpuRegister()) ||
+ (loc1.IsFpuRegister() && loc2.IsRegisterPair())) {
+ // Swap FPR and GPR register pair.
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
+ : loc2.AsFpuRegister<FRegister>();
+ Register r2_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
+ : loc2.AsRegisterPairLow<Register>();
+ Register r2_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
+ : loc2.AsRegisterPairHigh<Register>();
+ // Use 2 temporary registers because we can't first swap the low 32 bits of an FPR and
+ // then swap the high 32 bits of the same FPR. mtc1 makes the high 32 bits of an FPR
+ // unpredictable and the following mfch1 will fail.
+ __ Mfc1(TMP, f1);
+ __ Mfhc1(AT, f1);
+ __ Mtc1(r2_l, f1);
+ __ Mthc1(r2_h, f1);
+ __ Move(r2_l, TMP);
+ __ Move(r2_h, AT);
+ } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
+ } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
+ } else {
+ LOG(FATAL) << "Swap between " << loc1 << " and " << loc2 << " is unsupported";
+ }
+}
+
+void ParallelMoveResolverMIPS::RestoreScratch(int reg) {
+ __ Pop(static_cast<Register>(reg));
+}
+
+void ParallelMoveResolverMIPS::SpillScratch(int reg) {
+ __ Push(static_cast<Register>(reg));
+}
+
+void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot) {
+ // Allocate a scratch register other than TMP, if available.
+ // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
+ // automatically unspilled when the scratch scope object is destroyed).
+ ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
+ // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
+ int stack_offset = ensure_scratch.IsSpilled() ? kMipsWordSize : 0;
+ for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
+ __ LoadFromOffset(kLoadWord,
+ Register(ensure_scratch.GetRegister()),
+ SP,
+ index1 + stack_offset);
+ __ LoadFromOffset(kLoadWord,
+ TMP,
+ SP,
+ index2 + stack_offset);
+ __ StoreToOffset(kStoreWord,
+ Register(ensure_scratch.GetRegister()),
+ SP,
+ index2 + stack_offset);
+ __ StoreToOffset(kStoreWord, TMP, SP, index1 + stack_offset);
+ }
+}
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::MipsCore(static_cast<int>(reg));
+}
+
+// TODO: mapping of floating-point registers to DWARF.
+
+void CodeGeneratorMIPS::GenerateFrameEntry() {
+ __ Bind(&frame_entry_label_);
+
+ bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
+
+ if (do_overflow_check) {
+ __ LoadFromOffset(kLoadWord,
+ ZERO,
+ SP,
+ -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
+ RecordPcInfo(nullptr, 0);
+ }
+
+ if (HasEmptyFrame()) {
+ return;
+ }
+
+ // Make sure the frame size isn't unreasonably large.
+ if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
+ LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
+ }
+
+ // Spill callee-saved registers.
+ // Note that their cumulative size is small and they can be indexed using
+ // 16-bit offsets.
+
+ // TODO: increment/decrement SP in one step instead of two or remove this comment.
+
+ uint32_t ofs = FrameEntrySpillSize();
+ bool unaligned_float = ofs & 0x7;
+ bool fpu_32bit = isa_features_.Is32BitFloatingPoint();
+ __ IncreaseFrameSize(ofs);
+
+ for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
+ Register reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ ofs -= kMipsWordSize;
+ __ Sw(reg, SP, ofs);
+ __ cfi().RelOffset(DWARFReg(reg), ofs);
+ }
+ }
+
+ for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
+ FRegister reg = kFpuCalleeSaves[i];
+ if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+ ofs -= kMipsDoublewordSize;
+ // TODO: Change the frame to avoid unaligned accesses for fpu registers.
+ if (unaligned_float) {
+ if (fpu_32bit) {
+ __ Swc1(reg, SP, ofs);
+ __ Swc1(static_cast<FRegister>(reg + 1), SP, ofs + 4);
+ } else {
+ __ Mfhc1(TMP, reg);
+ __ Swc1(reg, SP, ofs);
+ __ Sw(TMP, SP, ofs + 4);
+ }
+ } else {
+ __ Sdc1(reg, SP, ofs);
+ }
+ // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
+ }
+ }
+
+ // Allocate the rest of the frame and store the current method pointer
+ // at its end.
+
+ __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
+
+ static_assert(IsInt<16>(kCurrentMethodStackOffset),
+ "kCurrentMethodStackOffset must fit into int16_t");
+ __ Sw(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+}
+
+void CodeGeneratorMIPS::GenerateFrameExit() {
+ __ cfi().RememberState();
+
+ if (!HasEmptyFrame()) {
+ // Deallocate the rest of the frame.
+
+ __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
+
+ // Restore callee-saved registers.
+ // Note that their cumulative size is small and they can be indexed using
+ // 16-bit offsets.
+
+ // TODO: increment/decrement SP in one step instead of two or remove this comment.
+
+ uint32_t ofs = 0;
+ bool unaligned_float = FrameEntrySpillSize() & 0x7;
+ bool fpu_32bit = isa_features_.Is32BitFloatingPoint();
+
+ for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+ FRegister reg = kFpuCalleeSaves[i];
+ if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+ if (unaligned_float) {
+ if (fpu_32bit) {
+ __ Lwc1(reg, SP, ofs);
+ __ Lwc1(static_cast<FRegister>(reg + 1), SP, ofs + 4);
+ } else {
+ __ Lwc1(reg, SP, ofs);
+ __ Lw(TMP, SP, ofs + 4);
+ __ Mthc1(TMP, reg);
+ }
+ } else {
+ __ Ldc1(reg, SP, ofs);
+ }
+ ofs += kMipsDoublewordSize;
+ // TODO: __ cfi().Restore(DWARFReg(reg));
+ }
+ }
+
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ Register reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ __ Lw(reg, SP, ofs);
+ ofs += kMipsWordSize;
+ __ cfi().Restore(DWARFReg(reg));
+ }
+ }
+
+ DCHECK_EQ(ofs, FrameEntrySpillSize());
+ __ DecreaseFrameSize(ofs);
+ }
+
+ __ Jr(RA);
+ __ Nop();
+
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(GetFrameSize());
+}
+
+void CodeGeneratorMIPS::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorMIPS::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ if (src.Equals(dst)) {
+ return;
+ }
+
+ if (src.IsConstant()) {
+ MoveConstant(dst, src.GetConstant());
+ } else {
+ if (Primitive::Is64BitType(dst_type)) {
+ Move64(dst, src);
+ } else {
+ Move32(dst, src);
+ }
+ }
+}
+
+void CodeGeneratorMIPS::Move32(Location destination, Location source) {
+ if (source.Equals(destination)) {
+ return;
+ }
+
+ if (destination.IsRegister()) {
+ if (source.IsRegister()) {
+ __ Move(destination.AsRegister<Register>(), source.AsRegister<Register>());
+ } else if (source.IsFpuRegister()) {
+ __ Mfc1(destination.AsRegister<Register>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
+ }
+ } else if (destination.IsFpuRegister()) {
+ if (source.IsRegister()) {
+ __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>());
+ } else if (source.IsFpuRegister()) {
+ __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
+ }
+ } else {
+ DCHECK(destination.IsStackSlot()) << destination;
+ if (source.IsRegister()) {
+ __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
+ } else if (source.IsFpuRegister()) {
+ __ StoreSToOffset(source.AsFpuRegister<FRegister>(), SP, destination.GetStackIndex());
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
+ }
+ }
+}
+
+void CodeGeneratorMIPS::Move64(Location destination, Location source) {
+ if (source.Equals(destination)) {
+ return;
+ }
+
+ if (destination.IsRegisterPair()) {
+ if (source.IsRegisterPair()) {
+ __ Move(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
+ __ Move(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
+ } else if (source.IsFpuRegister()) {
+ Register dst_high = destination.AsRegisterPairHigh<Register>();
+ Register dst_low = destination.AsRegisterPairLow<Register>();
+ FRegister src = source.AsFpuRegister<FRegister>();
+ __ Mfc1(dst_low, src);
+ __ Mfhc1(dst_high, src);
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ int32_t off = source.GetStackIndex();
+ Register r = destination.AsRegisterPairLow<Register>();
+ __ LoadFromOffset(kLoadDoubleword, r, SP, off);
+ }
+ } else if (destination.IsFpuRegister()) {
+ if (source.IsRegisterPair()) {
+ FRegister dst = destination.AsFpuRegister<FRegister>();
+ Register src_high = source.AsRegisterPairHigh<Register>();
+ Register src_low = source.AsRegisterPairLow<Register>();
+ __ Mtc1(src_low, dst);
+ __ Mthc1(src_high, dst);
+ } else if (source.IsFpuRegister()) {
+ __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
+ }
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ int32_t off = destination.GetStackIndex();
+ if (source.IsRegisterPair()) {
+ __ StoreToOffset(kStoreDoubleword, source.AsRegisterPairLow<Register>(), SP, off);
+ } else if (source.IsFpuRegister()) {
+ __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, off);
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, TMP, SP, off);
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
+ __ StoreToOffset(kStoreWord, TMP, SP, off + 4);
+ }
+ }
+}
+
+void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
+ if (c->IsIntConstant() || c->IsNullConstant()) {
+ // Move 32 bit constant.
+ int32_t value = GetInt32ValueOf(c);
+ if (destination.IsRegister()) {
+ Register dst = destination.AsRegister<Register>();
+ __ LoadConst32(dst, value);
+ } else {
+ DCHECK(destination.IsStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ } else if (c->IsLongConstant()) {
+ // Move 64 bit constant.
+ int64_t value = GetInt64ValueOf(c);
+ if (destination.IsRegisterPair()) {
+ Register r_h = destination.AsRegisterPairHigh<Register>();
+ Register r_l = destination.AsRegisterPairLow<Register>();
+ __ LoadConst64(r_h, r_l, value);
+ } else {
+ DCHECK(destination.IsDoubleStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ } else if (c->IsFloatConstant()) {
+ // Move 32 bit float constant.
+ int32_t value = GetInt32ValueOf(c);
+ if (destination.IsFpuRegister()) {
+ __ LoadSConst32(destination.AsFpuRegister<FRegister>(), value, TMP);
+ } else {
+ DCHECK(destination.IsStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ } else {
+ // Move 64 bit double constant.
+ DCHECK(c->IsDoubleConstant()) << c->DebugName();
+ int64_t value = GetInt64ValueOf(c);
+ if (destination.IsFpuRegister()) {
+ FRegister fd = destination.AsFpuRegister<FRegister>();
+ __ LoadDConst64(fd, value, TMP);
+ } else {
+ DCHECK(destination.IsDoubleStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ }
+}
+
+void CodeGeneratorMIPS::MoveConstant(Location destination, int32_t value) {
+ DCHECK(destination.IsRegister());
+ Register dst = destination.AsRegister<Register>();
+ __ LoadConst32(dst, value);
+}
+
+void CodeGeneratorMIPS::Move(HInstruction* instruction,
+ Location location,
+ HInstruction* move_for) {
+ LocationSummary* locations = instruction->GetLocations();
+ Primitive::Type type = instruction->GetType();
+ DCHECK_NE(type, Primitive::kPrimVoid);
+
+ if (instruction->IsCurrentMethod()) {
+ Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ } else if (instruction->IsIntConstant()
+ || instruction->IsLongConstant()
+ || instruction->IsNullConstant()) {
+ MoveConstant(location, instruction->AsConstant());
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
+ } else {
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
+ }
+ } else if (instruction->IsLoadLocal()) {
+ uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
+ if (Primitive::Is64BitType(type)) {
+ Move64(location, Location::DoubleStackSlot(stack_slot));
+ } else {
+ Move32(location, Location::StackSlot(stack_slot));
+ }
+ } else {
+ DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
+ if (Primitive::Is64BitType(type)) {
+ Move64(location, locations->Out());
+ } else {
+ Move32(location, locations->Out());
+ }
+ }
+}
+
+void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
+Location CodeGeneratorMIPS::GetStackLocation(HLoadLocal* load) const {
+ Primitive::Type type = load->GetType();
+
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ return Location::StackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type " << type;
+ }
+
+ LOG(FATAL) << "Unreachable";
+ return Location::NoLocation();
+}
+
+void CodeGeneratorMIPS::MarkGCCard(Register object, Register value) {
+ MipsLabel done;
+ Register card = AT;
+ Register temp = TMP;
+ __ Beqz(value, &done);
+ __ LoadFromOffset(kLoadWord,
+ card,
+ TR,
+ Thread::CardTableOffset<kMipsWordSize>().Int32Value());
+ __ Srl(temp, object, gc::accounting::CardTable::kCardShift);
+ __ Addu(temp, card, temp);
+ __ Sb(card, temp, 0);
+ __ Bind(&done);
+}
+
+void CodeGeneratorMIPS::SetupBlockedRegisters(bool is_baseline) const {
+ // Don't allocate the dalvik style register pair passing.
+ blocked_register_pairs_[A1_A2] = true;
+
+ // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
+ blocked_core_registers_[ZERO] = true;
+ blocked_core_registers_[K0] = true;
+ blocked_core_registers_[K1] = true;
+ blocked_core_registers_[GP] = true;
+ blocked_core_registers_[SP] = true;
+ blocked_core_registers_[RA] = true;
+
+ // AT and TMP(T8) are used as temporary/scratch registers
+ // (similar to how AT is used by MIPS assemblers).
+ blocked_core_registers_[AT] = true;
+ blocked_core_registers_[TMP] = true;
+ blocked_fpu_registers_[FTMP] = true;
+
+ // Reserve suspend and thread registers.
+ blocked_core_registers_[S0] = true;
+ blocked_core_registers_[TR] = true;
+
+ // Reserve T9 for function calls
+ blocked_core_registers_[T9] = true;
+
+ // Reserve odd-numbered FPU registers.
+ for (size_t i = 1; i < kNumberOfFRegisters; i += 2) {
+ blocked_fpu_registers_[i] = true;
+ }
+
+ if (is_baseline) {
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ blocked_core_registers_[kCoreCalleeSaves[i]] = true;
+ }
+
+ for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+ blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
+ }
+ }
+
+ UpdateBlockedPairRegisters();
+}
+
+void CodeGeneratorMIPS::UpdateBlockedPairRegisters() const {
+ for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+ MipsManagedRegister current =
+ MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+ if (blocked_core_registers_[current.AsRegisterPairLow()]
+ || blocked_core_registers_[current.AsRegisterPairHigh()]) {
+ blocked_register_pairs_[i] = true;
+ }
+ }
+}
+
+Location CodeGeneratorMIPS::AllocateFreeRegister(Primitive::Type type) const {
+ switch (type) {
+ case Primitive::kPrimLong: {
+ size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
+ MipsManagedRegister pair =
+ MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
+ DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
+ DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
+
+ blocked_core_registers_[pair.AsRegisterPairLow()] = true;
+ blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
+ UpdateBlockedPairRegisters();
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
+ }
+
+ case Primitive::kPrimByte:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
+ // Block all register pairs that contain `reg`.
+ for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+ MipsManagedRegister current =
+ MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+ if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
+ blocked_register_pairs_[i] = true;
+ }
+ }
+ return Location::RegisterLocation(reg);
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFRegisters);
+ return Location::FpuRegisterLocation(reg);
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ UNREACHABLE();
+}
+
+size_t CodeGeneratorMIPS::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ StoreToOffset(kStoreWord, Register(reg_id), SP, stack_index);
+ return kMipsWordSize;
+}
+
+size_t CodeGeneratorMIPS::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ LoadFromOffset(kLoadWord, Register(reg_id), SP, stack_index);
+ return kMipsWordSize;
+}
+
+size_t CodeGeneratorMIPS::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ __ StoreDToOffset(FRegister(reg_id), SP, stack_index);
+ return kMipsDoublewordSize;
+}
+
+size_t CodeGeneratorMIPS::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ __ LoadDFromOffset(FRegister(reg_id), SP, stack_index);
+ return kMipsDoublewordSize;
+}
+
+void CodeGeneratorMIPS::DumpCoreRegister(std::ostream& stream, int reg) const {
+ stream << MipsManagedRegister::FromCoreRegister(Register(reg));
+}
+
+void CodeGeneratorMIPS::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ stream << MipsManagedRegister::FromFRegister(FRegister(reg));
+}
+
+void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ InvokeRuntime(GetThreadOffset<kMipsWordSize>(entrypoint).Int32Value(),
+ instruction,
+ dex_pc,
+ slow_path,
+ IsDirectEntrypoint(entrypoint));
+}
+
+constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
+
+void CodeGeneratorMIPS::InvokeRuntime(int32_t entry_point_offset,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path,
+ bool is_direct_entrypoint) {
+ if (is_direct_entrypoint) {
+ // Reserve argument space on stack (for $a0-$a3) for
+ // entrypoints that directly reference native implementations.
+ // Called function may use this space to store $a0-$a3 regs.
+ __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
+ }
+ __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
+ __ Jalr(T9);
+ __ Nop();
+ if (is_direct_entrypoint) {
+ __ DecreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
+ }
+ RecordPcInfo(instruction, dex_pc, slow_path);
+}
+
+void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
+ Register class_reg) {
+ __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
+ __ LoadConst32(AT, mirror::Class::kStatusInitialized);
+ __ Blt(TMP, AT, slow_path->GetEntryLabel());
+ // Even if the initialized flag is set, we need to ensure consistent memory ordering.
+ __ Sync(0);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
+ __ Sync(0); // Only stype 0 is supported.
+}
+
+void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
+ HBasicBlock* successor) {
+ SuspendCheckSlowPathMIPS* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS(instruction, successor);
+ codegen_->AddSlowPath(slow_path);
+
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ TMP,
+ TR,
+ Thread::ThreadFlagsOffset<kMipsWordSize>().Int32Value());
+ if (successor == nullptr) {
+ __ Bnez(TMP, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetReturnLabel());
+ } else {
+ __ Beqz(TMP, codegen_->GetLabelOf(successor));
+ __ B(slow_path->GetEntryLabel());
+ // slow_path will return to GetLabelOf(successor).
+ }
+}
+
+InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
+ CodeGeneratorMIPS* codegen)
+ : HGraphVisitor(graph),
+ assembler_(codegen->GetAssembler()),
+ codegen_(codegen) {}
+
+void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
+ DCHECK_EQ(instruction->InputCount(), 2U);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type type = instruction->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ HInstruction* right = instruction->InputAt(1);
+ bool can_use_imm = false;
+ if (right->IsConstant()) {
+ int32_t imm = CodeGenerator::GetInt32ValueOf(right->AsConstant());
+ if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
+ can_use_imm = IsUint<16>(imm);
+ } else if (instruction->IsAdd()) {
+ can_use_imm = IsInt<16>(imm);
+ } else {
+ DCHECK(instruction->IsSub());
+ can_use_imm = IsInt<16>(-imm);
+ }
+ }
+ if (can_use_imm)
+ locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
+ else
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // TODO: can 2nd param be const?
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->IsAdd() || instruction->IsSub()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ } else {
+ DCHECK(instruction->IsAnd() || instruction->IsOr() || instruction->IsXor());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(instruction->IsAdd() || instruction->IsSub());
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Location rhs_location = locations->InAt(1);
+
+ Register rhs_reg = ZERO;
+ int32_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<Register>();
+ }
+
+ if (instruction->IsAnd()) {
+ if (use_imm)
+ __ Andi(dst, lhs, rhs_imm);
+ else
+ __ And(dst, lhs, rhs_reg);
+ } else if (instruction->IsOr()) {
+ if (use_imm)
+ __ Ori(dst, lhs, rhs_imm);
+ else
+ __ Or(dst, lhs, rhs_reg);
+ } else if (instruction->IsXor()) {
+ if (use_imm)
+ __ Xori(dst, lhs, rhs_imm);
+ else
+ __ Xor(dst, lhs, rhs_reg);
+ } else if (instruction->IsAdd()) {
+ if (use_imm)
+ __ Addiu(dst, lhs, rhs_imm);
+ else
+ __ Addu(dst, lhs, rhs_reg);
+ } else {
+ DCHECK(instruction->IsSub());
+ if (use_imm)
+ __ Addiu(dst, lhs, -rhs_imm);
+ else
+ __ Subu(dst, lhs, rhs_reg);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // TODO: can 2nd param be const?
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+ Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+
+ if (instruction->IsAnd()) {
+ __ And(dst_low, lhs_low, rhs_low);
+ __ And(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsOr()) {
+ __ Or(dst_low, lhs_low, rhs_low);
+ __ Or(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsXor()) {
+ __ Xor(dst_low, lhs_low, rhs_low);
+ __ Xor(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsAdd()) {
+ __ Addu(dst_low, lhs_low, rhs_low);
+ __ Sltu(TMP, dst_low, lhs_low);
+ __ Addu(dst_high, lhs_high, rhs_high);
+ __ Addu(dst_high, dst_high, TMP);
+ } else {
+ DCHECK(instruction->IsSub());
+ __ Subu(dst_low, lhs_low, rhs_low);
+ __ Sltu(TMP, lhs_low, dst_low);
+ __ Subu(dst_high, lhs_high, rhs_high);
+ __ Subu(dst_high, dst_high, TMP);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (instruction->IsAdd()) {
+ if (type == Primitive::kPrimFloat) {
+ __ AddS(dst, lhs, rhs);
+ } else {
+ __ AddD(dst, lhs, rhs);
+ }
+ } else {
+ DCHECK(instruction->IsSub());
+ if (type == Primitive::kPrimFloat) {
+ __ SubS(dst, lhs, rhs);
+ } else {
+ __ SubD(dst, lhs, rhs);
+ }
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected binary operation type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ Primitive::Type type = instr->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected shift type " << type;
+ }
+}
+
+static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
+
+void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ LocationSummary* locations = instr->GetLocations();
+ Primitive::Type type = instr->GetType();
+
+ Location rhs_location = locations->InAt(1);
+ bool use_imm = rhs_location.IsConstant();
+ Register rhs_reg = use_imm ? ZERO : rhs_location.AsRegister<Register>();
+ int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
+ uint32_t shift_mask = (type == Primitive::kPrimInt) ? kMaxIntShiftValue : kMaxLongShiftValue;
+ uint32_t shift_value = rhs_imm & shift_mask;
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ if (use_imm) {
+ if (instr->IsShl()) {
+ __ Sll(dst, lhs, shift_value);
+ } else if (instr->IsShr()) {
+ __ Sra(dst, lhs, shift_value);
+ } else {
+ __ Srl(dst, lhs, shift_value);
+ }
+ } else {
+ if (instr->IsShl()) {
+ __ Sllv(dst, lhs, rhs_reg);
+ } else if (instr->IsShr()) {
+ __ Srav(dst, lhs, rhs_reg);
+ } else {
+ __ Srlv(dst, lhs, rhs_reg);
+ }
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ if (use_imm) {
+ if (shift_value == 0) {
+ codegen_->Move64(locations->Out(), locations->InAt(0));
+ } else if (shift_value < kMipsBitsPerWord) {
+ if (instr->IsShl()) {
+ __ Sll(dst_low, lhs_low, shift_value);
+ __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value);
+ __ Sll(dst_high, lhs_high, shift_value);
+ __ Or(dst_high, dst_high, TMP);
+ } else if (instr->IsShr()) {
+ __ Sra(dst_high, lhs_high, shift_value);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ } else {
+ __ Srl(dst_high, lhs_high, shift_value);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ }
+ } else {
+ shift_value -= kMipsBitsPerWord;
+ if (instr->IsShl()) {
+ __ Sll(dst_high, lhs_low, shift_value);
+ __ Move(dst_low, ZERO);
+ } else if (instr->IsShr()) {
+ __ Sra(dst_low, lhs_high, shift_value);
+ __ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
+ } else {
+ __ Srl(dst_low, lhs_high, shift_value);
+ __ Move(dst_high, ZERO);
+ }
+ }
+ } else {
+ MipsLabel done;
+ if (instr->IsShl()) {
+ __ Sllv(dst_low, lhs_low, rhs_reg);
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Srl(TMP, lhs_low, 1);
+ __ Srlv(TMP, TMP, AT);
+ __ Sllv(dst_high, lhs_high, rhs_reg);
+ __ Or(dst_high, dst_high, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(dst_high, dst_low);
+ __ Move(dst_low, ZERO);
+ } else if (instr->IsShr()) {
+ __ Srav(dst_high, lhs_high, rhs_reg);
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Sll(TMP, lhs_high, 1);
+ __ Sllv(TMP, TMP, AT);
+ __ Srlv(dst_low, lhs_low, rhs_reg);
+ __ Or(dst_low, dst_low, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(dst_low, dst_high);
+ __ Sra(dst_high, dst_high, 31);
+ } else {
+ __ Srlv(dst_high, lhs_high, rhs_reg);
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Sll(TMP, lhs_high, 1);
+ __ Sllv(TMP, TMP, AT);
+ __ Srlv(dst_low, lhs_low, rhs_reg);
+ __ Or(dst_low, dst_low, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(dst_low, dst_high);
+ __ Move(dst_high, ZERO);
+ }
+ __ Bind(&done);
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected shift operation type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitAdd(HAdd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAdd(HAdd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Location index = locations->InAt(1);
+ Primitive::Type type = instruction->GetType();
+
+ switch (type) {
+ case Primitive::kPrimBoolean: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
+ } else {
+ __ Addu(TMP, obj, index.AsRegister<Register>());
+ __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
+ } else {
+ __ Addu(TMP, obj, index.AsRegister<Register>());
+ __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadFromOffset(kLoadWord, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadWord, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ Register out = locations->Out().AsRegisterPairLow<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ FRegister out = locations->Out().AsFpuRegister<FRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadSFromOffset(out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ LoadSFromOffset(out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ FRegister out = locations->Out().AsFpuRegister<FRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ LoadDFromOffset(out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ LoadDFromOffset(out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+ __ LoadFromOffset(kLoadWord, out, obj, offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool is_object = value_type == Primitive::kPrimNot;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction,
+ is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
+ if (is_object) {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Location index = locations->InAt(1);
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool needs_runtime_call = locations->WillCall();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+
+ switch (value_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ StoreToOffset(kStoreByte, value, obj, offset);
+ } else {
+ __ Addu(TMP, obj, index.AsRegister<Register>());
+ __ StoreToOffset(kStoreByte, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ StoreToOffset(kStoreHalfword, value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ if (!needs_runtime_call) {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreToOffset(kStoreWord, value, obj, offset);
+ } else {
+ DCHECK(index.IsRegister()) << index;
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreWord, value, TMP, data_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (needs_write_barrier) {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->MarkGCCard(obj, value);
+ }
+ } else {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickAputObject));
+ CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegisterPairLow<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ StoreToOffset(kStoreDoubleword, value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
+ DCHECK(locations->InAt(2).IsFpuRegister());
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreSToOffset(value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ StoreSToOffset(value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
+ DCHECK(locations->InAt(2).IsFpuRegister());
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ StoreDToOffset(value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ StoreDToOffset(value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+
+ // Ints and objects are handled in the switch.
+ if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+}
+
+void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ Register index = locations->InAt(0).AsRegister<Register>();
+ Register length = locations->InAt(1).AsRegister<Register>();
+
+ // length is limited by the maximum positive signed 32-bit integer.
+ // Unsigned comparison of length and index checks for index < 0
+ // and for length <= index simultaneously.
+ __ Bgeu(index, length, slow_path->GetEntryLabel());
+}
+
+void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction,
+ LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Note that TypeCheckSlowPathMIPS uses this register too.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register cls = locations->InAt(1).AsRegister<Register>();
+ Register obj_cls = locations->GetTemp(0).AsRegister<Register>();
+
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ Beqz(obj, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
+ __ Bne(obj_cls, cls, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (check->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
+ // We assume the class is not null.
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ check->GetLoadClass(),
+ check,
+ check->GetDexPc(),
+ true);
+ codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path,
+ check->GetLocations()->InAt(0).AsRegister<Register>());
+}
+
+void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
+ Primitive::Type in_type = compare->InputAt(0)->GetType();
+
+ LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
+
+ switch (in_type) {
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Output overlaps because it is written before doing the low comparison.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type for compare operation " << in_type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Primitive::Type in_type = instruction->InputAt(0)->GetType();
+
+ // 0 if: left == right
+ // 1 if: left > right
+ // -1 if: left < right
+ switch (in_type) {
+ case Primitive::kPrimLong: {
+ MipsLabel done;
+ Register res = locations->Out().AsRegister<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+ Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+ // TODO: more efficient (direct) comparison with a constant.
+ __ Slt(TMP, lhs_high, rhs_high);
+ __ Slt(AT, rhs_high, lhs_high); // Inverted: is actually gt.
+ __ Subu(res, AT, TMP); // Result -1:1:0 for [ <, >, == ].
+ __ Bnez(res, &done); // If we compared ==, check if lower bits are also equal.
+ __ Sltu(TMP, lhs_low, rhs_low);
+ __ Sltu(AT, rhs_low, lhs_low); // Inverted: is actually gt.
+ __ Subu(res, AT, TMP); // Result -1:1:0 for [ <, >, == ].
+ __ Bind(&done);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ int32_t entry_point_offset;
+ bool direct;
+ if (in_type == Primitive::kPrimFloat) {
+ if (instruction->IsGtBias()) {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmpgFloat);
+ direct = IsDirectEntrypoint(kQuickCmpgFloat);
+ } else {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmplFloat);
+ direct = IsDirectEntrypoint(kQuickCmplFloat);
+ }
+ } else {
+ if (instruction->IsGtBias()) {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmpgDouble);
+ direct = IsDirectEntrypoint(kQuickCmpgDouble);
+ } else {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmplDouble);
+ direct = IsDirectEntrypoint(kQuickCmplDouble);
+ }
+ }
+ codegen_->InvokeRuntime(entry_point_offset,
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ direct);
+ if (in_type == Primitive::kPrimFloat) {
+ if (instruction->IsGtBias()) {
+ CheckEntrypointTypes<kQuickCmpgFloat, int32_t, float, float>();
+ } else {
+ CheckEntrypointTypes<kQuickCmplFloat, int32_t, float, float>();
+ }
+ } else {
+ if (instruction->IsGtBias()) {
+ CheckEntrypointTypes<kQuickCmpgDouble, int32_t, double, double>();
+ } else {
+ CheckEntrypointTypes<kQuickCmplDouble, int32_t, double, double>();
+ }
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unimplemented compare type " << in_type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitCondition(HCondition* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (instruction->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitCondition(HCondition* instruction) {
+ if (!instruction->NeedsMaterialization()) {
+ return;
+ }
+ // TODO: generalize to long
+ DCHECK_NE(instruction->InputAt(0)->GetType(), Primitive::kPrimLong);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Register dst = locations->Out().AsRegister<Register>();
+
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Location rhs_location = locations->InAt(1);
+
+ Register rhs_reg = ZERO;
+ int64_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<Register>();
+ }
+
+ IfCondition if_cond = instruction->GetCondition();
+
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondNE:
+ if (use_imm && IsUint<16>(rhs_imm)) {
+ __ Xori(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Xor(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
+ break;
+
+ case kCondLT:
+ case kCondGE:
+ if (use_imm && IsInt<16>(rhs_imm)) {
+ __ Slti(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondGE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the slt instruction but no sge.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondLE:
+ case kCondGT:
+ if (use_imm && IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Slti(dst, lhs, rhs_imm + 1);
+ if (if_cond == kCondGT) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the slti instruction but no sgti.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, rhs_reg, lhs);
+ if (if_cond == kCondLE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the slt instruction but no sle.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
+
+ case kCondB:
+ case kCondAE:
+ // Use sltiu instruction if rhs_imm is in range [0, 32767] or in
+ // [max_unsigned - 32767 = 0xffff8000, max_unsigned = 0xffffffff].
+ if (use_imm &&
+ (IsUint<15>(rhs_imm) ||
+ IsUint<15>(rhs_imm - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(15))))) {
+ if (IsUint<15>(rhs_imm)) {
+ __ Sltiu(dst, lhs, rhs_imm);
+ } else {
+ // 16-bit value (in range [0x8000, 0xffff]) passed to sltiu is sign-extended
+ // and then used as unsigned integer (range [0xffff8000, 0xffffffff]).
+ __ Sltiu(dst, lhs, rhs_imm - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(16)));
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondAE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the sltu instruction but no sgeu.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondBE:
+ case kCondA:
+ // Use sltiu instruction if rhs_imm is in range [0, 32766] or in
+ // [max_unsigned - 32767 - 1 = 0xffff7fff, max_unsigned - 1 = 0xfffffffe].
+ // lhs <= rhs is simulated via lhs < rhs + 1.
+ if (use_imm && (rhs_imm != -1) &&
+ (IsUint<15>(rhs_imm + 1) ||
+ IsUint<15>(rhs_imm + 1 - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(15))))) {
+ if (IsUint<15>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Sltiu(dst, lhs, rhs_imm + 1);
+ } else {
+ // 16-bit value (in range [0x8000, 0xffff]) passed to sltiu is sign-extended
+ // and then used as unsigned integer (range [0xffff8000, 0xffffffff] where rhs_imm
+ // is in range [0xffff7fff, 0xfffffffe] since lhs <= rhs is simulated via lhs < rhs + 1).
+ __ Sltiu(dst, lhs, rhs_imm + 1 - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(16)));
+ }
+ if (if_cond == kCondA) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the sltiu instruction but no sgtiu.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, rhs_reg, lhs);
+ if (if_cond == kCondBE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the sltu instruction but no sleu.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
+ }
+}
+
+void LocationsBuilderMIPS::VisitDiv(HDiv* div) {
+ Primitive::Type type = div->GetResultType();
+ LocationSummary::CallKind call_kind = (type == Primitive::kPrimLong)
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ locations->SetOut(calling_convention.GetReturnLocation(type));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Register rhs = locations->InAt(1).AsRegister<Register>();
+ if (isR6) {
+ __ DivR6(dst, lhs, rhs);
+ } else {
+ __ DivR2(dst, lhs, rhs);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickLdiv));
+ CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ DivS(dst, lhs, rhs);
+ } else {
+ __ DivD(dst, lhs, rhs);
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected div type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ Location value = instruction->GetLocations()->InAt(0);
+ Primitive::Type type = instruction->GetType();
+
+ switch (type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt: {
+ if (value.IsConstant()) {
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // A division by a non-null constant is valid. We don't need to perform
+ // any check, so simply fall through.
+ }
+ } else {
+ DCHECK(value.IsRegister()) << value;
+ __ Beqz(value.AsRegister<Register>(), slow_path->GetEntryLabel());
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsConstant()) {
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // A division by a non-null constant is valid. We don't need to perform
+ // any check, so simply fall through.
+ }
+ } else {
+ DCHECK(value.IsRegisterPair()) << value;
+ __ Or(TMP, value.AsRegisterPairHigh<Register>(), value.AsRegisterPairLow<Register>());
+ __ Beqz(TMP, slow_path->GetEntryLabel());
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
+ }
+}
+
+void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitExit(HExit* exit) {
+ exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitGoto(HGoto* got) {
+ got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* successor) {
+ DCHECK(!successor->IsExitBlock());
+ HBasicBlock* block = got->GetBlock();
+ HInstruction* previous = got->GetPrevious();
+ HLoopInformation* info = block->GetLoopInformation();
+
+ if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
+ GenerateSuspendCheck(info->GetSuspendCheck(), successor);
+ return;
+ }
+ if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
+ GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
+ }
+ if (!codegen_->GoesToNextBlock(block, successor)) {
+ __ B(codegen_->GetLabelOf(successor));
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitGoto(HGoto* got) {
+ HandleGoto(got, got->GetSuccessor());
+}
+
+void LocationsBuilderMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
+ try_boundary->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
+ HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
+ if (!successor->IsExitBlock()) {
+ HandleGoto(try_boundary, successor);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instruction,
+ MipsLabel* true_target,
+ MipsLabel* false_target,
+ MipsLabel* always_true_target) {
+ HInstruction* cond = instruction->InputAt(0);
+ HCondition* condition = cond->AsCondition();
+
+ if (cond->IsIntConstant()) {
+ int32_t cond_value = cond->AsIntConstant()->GetValue();
+ if (cond_value == 1) {
+ if (always_true_target != nullptr) {
+ __ B(always_true_target);
+ }
+ return;
+ } else {
+ DCHECK_EQ(cond_value, 0);
+ }
+ } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
+ // The condition instruction has been materialized, compare the output to 0.
+ Location cond_val = instruction->GetLocations()->InAt(0);
+ DCHECK(cond_val.IsRegister());
+ __ Bnez(cond_val.AsRegister<Register>(), true_target);
+ } else {
+ // The condition instruction has not been materialized, use its inputs as
+ // the comparison and its condition as the branch condition.
+ Register lhs = condition->GetLocations()->InAt(0).AsRegister<Register>();
+ Location rhs_location = condition->GetLocations()->InAt(1);
+ Register rhs_reg = ZERO;
+ int32_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<Register>();
+ }
+
+ IfCondition if_cond = condition->GetCondition();
+ if (use_imm && rhs_imm == 0) {
+ switch (if_cond) {
+ case kCondEQ:
+ __ Beqz(lhs, true_target);
+ break;
+ case kCondNE:
+ __ Bnez(lhs, true_target);
+ break;
+ case kCondLT:
+ __ Bltz(lhs, true_target);
+ break;
+ case kCondGE:
+ __ Bgez(lhs, true_target);
+ break;
+ case kCondLE:
+ __ Blez(lhs, true_target);
+ break;
+ case kCondGT:
+ __ Bgtz(lhs, true_target);
+ break;
+ case kCondB:
+ break; // always false
+ case kCondBE:
+ __ Beqz(lhs, true_target); // <= 0 if zero
+ break;
+ case kCondA:
+ __ Bnez(lhs, true_target); // > 0 if non-zero
+ break;
+ case kCondAE:
+ __ B(true_target); // always true
+ break;
+ }
+ } else {
+ if (use_imm) {
+ // TODO: more efficient comparison with 16-bit constants without loading them into TMP.
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ switch (if_cond) {
+ case kCondEQ:
+ __ Beq(lhs, rhs_reg, true_target);
+ break;
+ case kCondNE:
+ __ Bne(lhs, rhs_reg, true_target);
+ break;
+ case kCondLT:
+ __ Blt(lhs, rhs_reg, true_target);
+ break;
+ case kCondGE:
+ __ Bge(lhs, rhs_reg, true_target);
+ break;
+ case kCondLE:
+ __ Bge(rhs_reg, lhs, true_target);
+ break;
+ case kCondGT:
+ __ Blt(rhs_reg, lhs, true_target);
+ break;
+ case kCondB:
+ __ Bltu(lhs, rhs_reg, true_target);
+ break;
+ case kCondAE:
+ __ Bgeu(lhs, rhs_reg, true_target);
+ break;
+ case kCondBE:
+ __ Bgeu(rhs_reg, lhs, true_target);
+ break;
+ case kCondA:
+ __ Bltu(rhs_reg, lhs, true_target);
+ break;
+ }
+ }
+ }
+ if (false_target != nullptr) {
+ __ B(false_target);
+ }
+}
+
+void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ HInstruction* cond = if_instr->InputAt(0);
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
+ MipsLabel* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
+ MipsLabel* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+ MipsLabel* always_true_target = true_target;
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfTrueSuccessor())) {
+ always_true_target = nullptr;
+ }
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfFalseSuccessor())) {
+ false_target = nullptr;
+ }
+ GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
+}
+
+void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+ HInstruction* cond = deoptimize->InputAt(0);
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena())
+ DeoptimizationSlowPathMIPS(deoptimize);
+ codegen_->AddSlowPath(slow_path);
+ MipsLabel* slow_path_entry = slow_path->GetEntryLabel();
+ GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
+}
+
+void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
+ Primitive::Type field_type = field_info.GetFieldType();
+ bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
+ bool generate_volatile = field_info.IsVolatile() && is_wide;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, generate_volatile ? LocationSummary::kCall : LocationSummary::kNoCall);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (generate_volatile) {
+ InvokeRuntimeCallingConvention calling_convention;
+ // need A0 to hold base + offset
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ if (field_type == Primitive::kPrimLong) {
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimLong));
+ } else {
+ locations->SetOut(Location::RequiresFpuRegister());
+ // Need some temp core regs since FP results are returned in core registers
+ Location reg = calling_convention.GetReturnLocation(Primitive::kPrimLong);
+ locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairLow<Register>()));
+ locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairHigh<Register>()));
+ }
+ } else {
+ if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ uint32_t dex_pc) {
+ Primitive::Type type = field_info.GetFieldType();
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ LoadOperandType load_type = kLoadUnsignedByte;
+ bool is_volatile = field_info.IsVolatile();
+
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ load_type = kLoadUnsignedByte;
+ break;
+ case Primitive::kPrimByte:
+ load_type = kLoadSignedByte;
+ break;
+ case Primitive::kPrimShort:
+ load_type = kLoadSignedHalfword;
+ break;
+ case Primitive::kPrimChar:
+ load_type = kLoadUnsignedHalfword;
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimNot:
+ load_type = kLoadWord;
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ load_type = kLoadDoubleword;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+
+ if (is_volatile && load_type == kLoadDoubleword) {
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Addiu32(locations->GetTemp(0).AsRegister<Register>(),
+ obj, field_info.GetFieldOffset().Uint32Value());
+ // Do implicit Null check
+ __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Load),
+ instruction,
+ dex_pc,
+ nullptr,
+ IsDirectEntrypoint(kQuickA64Load));
+ CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
+ if (type == Primitive::kPrimDouble) {
+ // Need to move to FP regs since FP results are returned in core registers.
+ __ Mtc1(locations->GetTemp(1).AsRegister<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
+ __ Mthc1(locations->GetTemp(2).AsRegister<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
+ }
+ } else {
+ if (!Primitive::IsFloatingPointType(type)) {
+ Register dst;
+ if (type == Primitive::kPrimLong) {
+ DCHECK(locations->Out().IsRegisterPair());
+ dst = locations->Out().AsRegisterPairLow<Register>();
+ } else {
+ DCHECK(locations->Out().IsRegister());
+ dst = locations->Out().AsRegister<Register>();
+ }
+ __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ DCHECK(locations->Out().IsFpuRegister());
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ LoadSFromOffset(dst, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ __ LoadDFromOffset(dst, obj, field_info.GetFieldOffset().Uint32Value());
+ }
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+}
+
+void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
+ Primitive::Type field_type = field_info.GetFieldType();
+ bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
+ bool generate_volatile = field_info.IsVolatile() && is_wide;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, generate_volatile ? LocationSummary::kCall : LocationSummary::kNoCall);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (generate_volatile) {
+ InvokeRuntimeCallingConvention calling_convention;
+ // need A0 to hold base + offset
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ if (field_type == Primitive::kPrimLong) {
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ } else {
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ // Pass FP parameters in core registers.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ }
+ } else {
+ if (Primitive::IsFloatingPointType(field_type)) {
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ uint32_t dex_pc) {
+ Primitive::Type type = field_info.GetFieldType();
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ StoreOperandType store_type = kStoreByte;
+ bool is_volatile = field_info.IsVolatile();
+
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ store_type = kStoreByte;
+ break;
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar:
+ store_type = kStoreHalfword;
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimNot:
+ store_type = kStoreWord;
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ store_type = kStoreDoubleword;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+ }
+
+ if (is_volatile && store_type == kStoreDoubleword) {
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Addiu32(locations->GetTemp(0).AsRegister<Register>(),
+ obj, field_info.GetFieldOffset().Uint32Value());
+ // Do implicit Null check.
+ __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ if (type == Primitive::kPrimDouble) {
+ // Pass FP parameters in core registers.
+ __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
+ locations->InAt(1).AsFpuRegister<FRegister>());
+ __ Mfhc1(locations->GetTemp(2).AsRegister<Register>(),
+ locations->InAt(1).AsFpuRegister<FRegister>());
+ }
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Store),
+ instruction,
+ dex_pc,
+ nullptr,
+ IsDirectEntrypoint(kQuickA64Store));
+ CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
+ } else {
+ if (!Primitive::IsFloatingPointType(type)) {
+ Register src;
+ if (type == Primitive::kPrimLong) {
+ DCHECK(locations->InAt(1).IsRegisterPair());
+ src = locations->InAt(1).AsRegisterPairLow<Register>();
+ } else {
+ DCHECK(locations->InAt(1).IsRegister());
+ src = locations->InAt(1).AsRegister<Register>();
+ }
+ __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ DCHECK(locations->InAt(1).IsFpuRegister());
+ FRegister src = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ StoreSToOffset(src, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ __ StoreDToOffset(src, obj, field_info.GetFieldOffset().Uint32Value());
+ }
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ // TODO: memory barriers?
+ if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
+ DCHECK(locations->InAt(1).IsRegister());
+ Register src = locations->InAt(1).AsRegister<Register>();
+ codegen_->MarkGCCard(obj, src);
+ }
+
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ }
+}
+
+void LocationsBuilderMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary::CallKind call_kind =
+ instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // The output does overlap inputs.
+ // Note that TypeCheckSlowPathMIPS uses this register too.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register cls = locations->InAt(1).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ MipsLabel done;
+
+ // Return 0 if `obj` is null.
+ // TODO: Avoid this check if we know `obj` is not null.
+ __ Move(out, ZERO);
+ __ Beqz(obj, &done);
+
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, out, obj, mirror::Object::ClassOffset().Int32Value());
+ if (instruction->IsExactCheck()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ Xor(out, out, cls);
+ __ Sltiu(out, out, 1);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ Bne(out, cls, slow_path->GetEntryLabel());
+ __ LoadConst32(out, 1);
+ __ Bind(slow_path->GetExitLabel());
+ }
+
+ __ Bind(&done);
+}
+
+void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::HandleInvoke(HInvoke* invoke) {
+ InvokeDexCallingConventionVisitorMIPS calling_convention_visitor;
+ CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
+}
+
+void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
+ HandleInvoke(invoke);
+ // The register T0 is required to be used for the hidden argument in
+ // art_quick_imt_conflict_trampoline, so add the hidden argument.
+ invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
+ // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value();
+ Location receiver = invoke->GetLocations()->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+
+ // Set the hidden argument.
+ __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
+ invoke->GetDexMethodIndex());
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+ } else {
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // temp = temp->GetImtEntryAt(method_offset);
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+ // T9 = temp->GetEntryPoint();
+ __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
+ // T9();
+ __ Jalr(T9);
+ __ Nop();
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ // TODO: intrinsic function.
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
+ // TODO: intrinsic function.
+ HandleInvoke(invoke);
+}
+
+static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen ATTRIBUTE_UNUSED) {
+ if (invoke->GetLocations()->Intrinsified()) {
+ // TODO: intrinsic function.
+ return true;
+ }
+ return false;
+}
+
+void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+ // All registers are assumed to be correctly set up per the calling convention.
+
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
+ // temp = thread->string_init_entrypoint
+ __ LoadFromOffset(kLoadWord,
+ temp.AsRegister<Register>(),
+ TR,
+ invoke->GetStringInitOffset());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
+ __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+ // TODO: Implement this type. (Needs literal support.) At the moment, the
+ // CompilerDriver will not direct the backend to use this type for MIPS.
+ LOG(FATAL) << "Unsupported!";
+ UNREACHABLE();
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+ // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
+ FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
+ Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+ Register reg = temp.AsRegister<Register>();
+ Register method_reg;
+ if (current_method.IsRegister()) {
+ method_reg = current_method.AsRegister<Register>();
+ } else {
+ // TODO: use the appropriate DCHECK() here if possible.
+ // DCHECK(invoke->GetLocations()->Intrinsified());
+ DCHECK(!current_method.IsValid());
+ method_reg = reg;
+ __ Lw(reg, SP, kCurrentMethodStackOffset);
+ }
+
+ // temp = temp->dex_cache_resolved_methods_;
+ __ LoadFromOffset(kLoadWord,
+ reg,
+ method_reg,
+ ArtMethod::DexCacheResolvedMethodsOffset(kMipsPointerSize).Int32Value());
+ // temp = temp[index_in_cache]
+ uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
+ __ LoadFromOffset(kLoadWord,
+ reg,
+ reg,
+ CodeGenerator::GetCachePointerOffset(index_in_cache));
+ break;
+ }
+ }
+
+ switch (invoke->GetCodePtrLocation()) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
+ __ Jalr(&frame_entry_label_, T9);
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // LR = invoke->GetDirectCodePtr();
+ __ LoadConst32(T9, invoke->GetDirectCodePtr());
+ // LR()
+ __ Jalr(T9);
+ __ Nop();
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
+ FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
+ FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
+ // T9 = callee_method->entry_point_from_quick_compiled_code_;
+ __ LoadFromOffset(kLoadWord,
+ T9,
+ callee_method.AsRegister<Register>(),
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kMipsWordSize).Int32Value());
+ // T9()
+ __ Jalr(T9);
+ __ Nop();
+ break;
+ }
+ DCHECK(!IsLeafMethod());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
+ if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+ return;
+ }
+
+ LocationSummary* locations = invoke->GetLocations();
+ codegen_->GenerateStaticOrDirectCall(invoke,
+ locations->HasTemps()
+ ? locations->GetTemp(0)
+ : Location::NoLocation());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ // TODO: Try to generate intrinsics code.
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+ size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+ } else {
+ DCHECK(receiver.IsRegister());
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // temp = temp->GetMethodAt(method_offset);
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+ // T9 = temp->GetEntryPoint();
+ __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
+ // T9();
+ __ Jalr(T9);
+ __ Nop();
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary* locations = cls->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
+ if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
+ __ LoadFromOffset(kLoadWord, out, current_method,
+ ArtMethod::DeclaringClassOffset().Int32Value());
+ } else {
+ DCHECK(cls->CanCallRuntime());
+ __ LoadFromOffset(kLoadWord, out, current_method,
+ ArtMethod::DexCacheResolvedTypesOffset(kMipsPointerSize).Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ cls,
+ cls,
+ cls->GetDexPc(),
+ cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ Beqz(out, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ }
+}
+
+static int32_t GetExceptionTlsOffset() {
+ return Thread::ExceptionOffset<kMipsWordSize>().Int32Value();
+}
+
+void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadException(HLoadException* load) {
+ Register out = load->GetLocations()->Out().AsRegister<Register>();
+ __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderMIPS::VisitLoadLocal(HLoadLocal* load) {
+ load->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = load->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
+ __ LoadFromOffset(kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+ __ Beqz(out, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderMIPS::VisitLocal(HLocal* local) {
+ local->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLocal(HLocal* local) {
+ DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
+}
+
+void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
+ if (instruction->IsEnter()) {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickLockObject));
+ CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
+ } else {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickUnlockObject));
+ }
+ CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
+}
+
+void LocationsBuilderMIPS::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Register rhs = locations->InAt(1).AsRegister<Register>();
+
+ if (isR6) {
+ __ MulR6(dst, lhs, rhs);
+ } else {
+ __ MulR2(dst, lhs, rhs);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+ Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+
+ // Extra checks to protect caused by the existance of A1_A2.
+ // The algorithm is wrong if dst_high is either lhs_lo or rhs_lo:
+ // (e.g. lhs=a0_a1, rhs=a2_a3 and dst=a1_a2).
+ DCHECK_NE(dst_high, lhs_low);
+ DCHECK_NE(dst_high, rhs_low);
+
+ // A_B * C_D
+ // dst_hi: [ low(A*D) + low(B*C) + hi(B*D) ]
+ // dst_lo: [ low(B*D) ]
+ // Note: R2 and R6 MUL produce the low 32 bit of the multiplication result.
+
+ if (isR6) {
+ __ MulR6(TMP, lhs_high, rhs_low);
+ __ MulR6(dst_high, lhs_low, rhs_high);
+ __ Addu(dst_high, dst_high, TMP);
+ __ MuhuR6(TMP, lhs_low, rhs_low);
+ __ Addu(dst_high, dst_high, TMP);
+ __ MulR6(dst_low, lhs_low, rhs_low);
+ } else {
+ __ MulR2(TMP, lhs_high, rhs_low);
+ __ MulR2(dst_high, lhs_low, rhs_high);
+ __ Addu(dst_high, dst_high, TMP);
+ __ MultuR2(lhs_low, rhs_low);
+ __ Mfhi(TMP);
+ __ Addu(dst_high, dst_high, TMP);
+ __ Mflo(dst_low);
+ }
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ MulS(dst, lhs, rhs);
+ } else {
+ __ MulD(dst, lhs, rhs);
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected mul type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register src = locations->InAt(0).AsRegister<Register>();
+ __ Subu(dst, ZERO, src);
+ break;
+ }
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ __ Subu(dst_low, ZERO, src_low);
+ __ Sltu(TMP, ZERO, dst_low);
+ __ Subu(dst_high, ZERO, src_high);
+ __ Subu(dst_high, dst_high, TMP);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ NegS(dst, src);
+ } else {
+ __ NegD(dst, src);
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected neg type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ Register current_method_register = calling_convention.GetRegisterAt(2);
+ __ Lw(current_method_register, SP, kCurrentMethodStackOffset);
+ // Move an uint16_t value to a register.
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickAllocArrayWithAccessCheck));
+ CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
+ void*, uint32_t, int32_t, ArtMethod*>();
+}
+
+void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ Register current_method_register = calling_convention.GetRegisterAt(1);
+ __ Lw(current_method_register, SP, kCurrentMethodStackOffset);
+ // Move an uint16_t value to a register.
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickAllocObjectWithAccessCheck));
+ CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+}
+
+void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register src = locations->InAt(0).AsRegister<Register>();
+ __ Nor(dst, src, ZERO);
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ __ Nor(dst_high, src_high, ZERO);
+ __ Nor(dst_low, src_low, ZERO);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
+ }
+}
+
+void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ __ Xori(locations->Out().AsRegister<Register>(),
+ locations->InAt(0).AsRegister<Register>(),
+ 1);
+}
+
+void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
+ if (codegen_->CanMoveNullCheckToUser(instruction)) {
+ return;
+ }
+ Location obj = instruction->GetLocations()->InAt(0);
+
+ __ Lw(ZERO, obj.AsRegister<Register>(), 0);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void InstructionCodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ Location obj = instruction->GetLocations()->InAt(0);
+
+ __ Beqz(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
+}
+
+void InstructionCodeGeneratorMIPS::VisitNullCheck(HNullCheck* instruction) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
+ GenerateImplicitNullCheck(instruction);
+ } else {
+ GenerateExplicitNullCheck(instruction);
+ }
+}
+
+void LocationsBuilderMIPS::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
+ codegen_->GetMoveResolver()->EmitNativeCode(instruction);
+}
+
+void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ }
+ locations->SetOut(location);
+}
+
+void InstructionCodeGeneratorMIPS::VisitParameterValue(HParameterValue* instruction
+ ATTRIBUTE_UNUSED) {
+ // Nothing to do, the parameter is already at its location.
+}
+
+void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
+}
+
+void InstructionCodeGeneratorMIPS::VisitCurrentMethod(HCurrentMethod* instruction
+ ATTRIBUTE_UNUSED) {
+ // Nothing to do, the method is already at its location.
+}
+
+void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
+ locations->SetInAt(i, Location::Any());
+ }
+ locations->SetOut(Location::Any());
+}
+
+void InstructionCodeGeneratorMIPS::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderMIPS::VisitRem(HRem* rem) {
+ Primitive::Type type = rem->GetResultType();
+ LocationSummary::CallKind call_kind =
+ (type == Primitive::kPrimInt) ? LocationSummary::kNoCall : LocationSummary::kCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ locations->SetOut(calling_convention.GetReturnLocation(type));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(type));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitRem(HRem* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Register rhs = locations->InAt(1).AsRegister<Register>();
+ if (isR6) {
+ __ ModR6(dst, lhs, rhs);
+ } else {
+ __ ModR2(dst, lhs, rhs);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickLmod));
+ CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
+ break;
+ }
+ case Primitive::kPrimFloat: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf),
+ instruction, instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickFmodf));
+ CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+ break;
+ }
+ case Primitive::kPrimDouble: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod),
+ instruction, instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickFmod));
+ CheckEntrypointTypes<kQuickL2d, double, int64_t>();
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected rem type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
+void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+ Primitive::Type return_type = ret->InputAt(0)->GetType();
+ locations->SetInAt(0, MipsReturnLocation(return_type));
+}
+
+void InstructionCodeGeneratorMIPS::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderMIPS::VisitReturnVoid(HReturnVoid* ret) {
+ ret->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderMIPS::VisitShl(HShl* shl) {
+ HandleShift(shl);
+}
+
+void InstructionCodeGeneratorMIPS::VisitShl(HShl* shl) {
+ HandleShift(shl);
+}
+
+void LocationsBuilderMIPS::VisitShr(HShr* shr) {
+ HandleShift(shr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitShr(HShr* shr) {
+ HandleShift(shr);
+}
+
+void LocationsBuilderMIPS::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
+ Primitive::Type field_type = store->InputAt(1)->GetType();
+ switch (field_type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << field_type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderMIPS::VisitSub(HSub* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitSub(HSub* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
+ HBasicBlock* block = instruction->GetBlock();
+ if (block->GetLoopInformation() != nullptr) {
+ DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
+ // The back edge will generate the suspend check.
+ return;
+ }
+ if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
+ // The goto will generate the suspend check.
+ return;
+ }
+ GenerateSuspendCheck(instruction, nullptr);
+}
+
+void LocationsBuilderMIPS::VisitTemporary(HTemporary* temp) {
+ temp->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitThrow(HThrow* instruction) {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickDeliverException));
+ CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
+}
+
+void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
+ Primitive::Type input_type = conversion->GetInputType();
+ Primitive::Type result_type = conversion->GetResultType();
+ DCHECK_NE(input_type, result_type);
+
+ if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
+ (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
+ LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
+ }
+
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
+ (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
+ call_kind = LocationSummary::kCall;
+ }
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+
+ if (call_kind == LocationSummary::kNoCall) {
+ if (Primitive::IsFloatingPointType(input_type)) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+
+ if (Primitive::IsFloatingPointType(result_type)) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+
+ if (Primitive::IsFloatingPointType(input_type)) {
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ } else {
+ DCHECK_EQ(input_type, Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ }
+
+ locations->SetOut(calling_convention.GetReturnLocation(result_type));
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
+
+ DCHECK_NE(input_type, result_type);
+
+ if (result_type == Primitive::kPrimLong && Primitive::IsIntegralType(input_type)) {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register src = locations->InAt(0).AsRegister<Register>();
+
+ __ Move(dst_low, src);
+ __ Sra(dst_high, src, 31);
+ } else if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register src = (input_type == Primitive::kPrimLong)
+ ? locations->InAt(0).AsRegisterPairLow<Register>()
+ : locations->InAt(0).AsRegister<Register>();
+
+ switch (result_type) {
+ case Primitive::kPrimChar:
+ __ Andi(dst, src, 0xFFFF);
+ break;
+ case Primitive::kPrimByte:
+ if (has_sign_extension) {
+ __ Seb(dst, src);
+ } else {
+ __ Sll(dst, src, 24);
+ __ Sra(dst, dst, 24);
+ }
+ break;
+ case Primitive::kPrimShort:
+ if (has_sign_extension) {
+ __ Seh(dst, src);
+ } else {
+ __ Sll(dst, src, 16);
+ __ Sra(dst, dst, 16);
+ }
+ break;
+ case Primitive::kPrimInt:
+ __ Move(dst, src);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
+ if (input_type != Primitive::kPrimLong) {
+ Register src = locations->InAt(0).AsRegister<Register>();
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ __ Mtc1(src, FTMP);
+ if (result_type == Primitive::kPrimFloat) {
+ __ Cvtsw(dst, FTMP);
+ } else {
+ __ Cvtdw(dst, FTMP);
+ }
+ } else {
+ int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
+ : QUICK_ENTRY_POINT(pL2d);
+ bool direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickL2f)
+ : IsDirectEntrypoint(kQuickL2d);
+ codegen_->InvokeRuntime(entry_offset,
+ conversion,
+ conversion->GetDexPc(),
+ nullptr,
+ direct);
+ if (result_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+ } else {
+ CheckEntrypointTypes<kQuickL2d, double, int64_t>();
+ }
+ }
+ } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
+ CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
+ int32_t entry_offset;
+ bool direct;
+ if (result_type != Primitive::kPrimLong) {
+ entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
+ : QUICK_ENTRY_POINT(pD2iz);
+ direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2iz)
+ : IsDirectEntrypoint(kQuickD2iz);
+ } else {
+ entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
+ : QUICK_ENTRY_POINT(pD2l);
+ direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2l)
+ : IsDirectEntrypoint(kQuickD2l);
+ }
+ codegen_->InvokeRuntime(entry_offset,
+ conversion,
+ conversion->GetDexPc(),
+ nullptr,
+ direct);
+ if (result_type != Primitive::kPrimLong) {
+ if (input_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickF2iz, int32_t, float>();
+ } else {
+ CheckEntrypointTypes<kQuickD2iz, int32_t, double>();
+ }
+ } else {
+ if (input_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>();
+ } else {
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>();
+ }
+ }
+ } else if (Primitive::IsFloatingPointType(result_type) &&
+ Primitive::IsFloatingPointType(input_type)) {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
+ if (result_type == Primitive::kPrimFloat) {
+ __ Cvtsd(dst, src);
+ } else {
+ __ Cvtds(dst, src);
+ }
+ } else {
+ LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitUShr(HUShr* ushr) {
+ HandleShift(ushr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUShr(HUShr* ushr) {
+ HandleShift(ushr);
+}
+
+void LocationsBuilderMIPS::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, this should be removed during prepare for register allocator.
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, this should be removed during prepare for register allocator.
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderMIPS::VisitEqual(HEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitEqual(HEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitNotEqual(HNotEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitNotEqual(HNotEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitLessThan(HLessThan* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLessThan(HLessThan* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitGreaterThan(HGreaterThan* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitGreaterThan(HGreaterThan* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorMIPS::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ int32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ // Create a set of compare/jumps.
+ const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
+ for (int32_t i = 0; i < num_entries; ++i) {
+ int32_t case_value = lower_bound + i;
+ MipsLabel* successor_label = codegen_->GetLabelOf(successors[i]);
+ if (case_value == 0) {
+ __ Beqz(value_reg, successor_label);
+ } else {
+ __ LoadConst32(TMP, case_value);
+ __ Beq(value_reg, TMP, successor_label);
+ }
+ }
+
+ // Insert the default branch for every other value.
+ if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
+ __ B(codegen_->GetLabelOf(default_block));
+ }
+}
+
+void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+ // The trampoline uses the same calling convention as dex calling conventions,
+ // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+ // the method_idx.
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+ codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
+}
+
+#undef __
+#undef QUICK_ENTRY_POINT
+
+} // namespace mips
+} // namespace art
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
new file mode 100644
index 0000000..a571e76
--- /dev/null
+++ b/compiler/optimizing/code_generator_mips.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
+
+#include "code_generator.h"
+#include "dex/compiler_enums.h"
+#include "driver/compiler_options.h"
+#include "nodes.h"
+#include "parallel_move_resolver.h"
+#include "utils/mips/assembler_mips.h"
+
+namespace art {
+namespace mips {
+
+// InvokeDexCallingConvention registers
+
+static constexpr Register kParameterCoreRegisters[] =
+ { A1, A2, A3 };
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+static constexpr FRegister kParameterFpuRegisters[] =
+ { F12, F14 };
+static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
+
+
+// InvokeRuntimeCallingConvention registers
+
+static constexpr Register kRuntimeParameterCoreRegisters[] =
+ { A0, A1, A2, A3 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+
+static constexpr FRegister kRuntimeParameterFpuRegisters[] =
+ { F12, F14};
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+ arraysize(kRuntimeParameterFpuRegisters);
+
+
+static constexpr Register kCoreCalleeSaves[] =
+ { S0, S1, S2, S3, S4, S5, S6, S7, FP, RA };
+static constexpr FRegister kFpuCalleeSaves[] =
+ { F20, F22, F24, F26, F28, F30 };
+
+
+class CodeGeneratorMIPS;
+
+class InvokeDexCallingConvention : public CallingConvention<Register, FRegister> {
+ public:
+ InvokeDexCallingConvention()
+ : CallingConvention(kParameterCoreRegisters,
+ kParameterCoreRegistersLength,
+ kParameterFpuRegisters,
+ kParameterFpuRegistersLength,
+ kMipsPointerSize) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitorMIPS : public InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitorMIPS() {}
+ virtual ~InvokeDexCallingConventionVisitorMIPS() {}
+
+ Location GetNextLocation(Primitive::Type type) OVERRIDE;
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE;
+ Location GetMethodLocation() const OVERRIDE;
+
+ private:
+ InvokeDexCallingConvention calling_convention;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS);
+};
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, FRegister> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength,
+ kMipsPointerSize) {}
+
+ Location GetReturnLocation(Primitive::Type return_type);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
+class FieldAccessCallingConventionMIPS : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionMIPS() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(A1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(A0);
+ }
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(V0, V1)
+ : Location::RegisterLocation(V0);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(A2, A3)
+ : (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(F0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS);
+};
+
+class ParallelMoveResolverMIPS : public ParallelMoveResolverWithSwap {
+ public:
+ ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen)
+ : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
+
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
+
+ void Exchange(int index1, int index2, bool double_slot);
+
+ MipsAssembler* GetAssembler() const;
+
+ private:
+ CodeGeneratorMIPS* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS);
+};
+
+class SlowPathCodeMIPS : public SlowPathCode {
+ public:
+ SlowPathCodeMIPS() : entry_label_(), exit_label_() {}
+
+ MipsLabel* GetEntryLabel() { return &entry_label_; }
+ MipsLabel* GetExitLabel() { return &exit_label_; }
+
+ private:
+ MipsLabel entry_label_;
+ MipsLabel exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS);
+};
+
+class LocationsBuilderMIPS : public HGraphVisitor {
+ public:
+ LocationsBuilderMIPS(HGraph* graph, CodeGeneratorMIPS* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr) OVERRIDE;
+
+ FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
+ << " (id " << instruction->GetId() << ")";
+ }
+
+ private:
+ void HandleInvoke(HInvoke* invoke);
+ void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleShift(HBinaryOperation* operation);
+ void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+
+ InvokeDexCallingConventionVisitorMIPS parameter_visitor_;
+
+ CodeGeneratorMIPS* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS);
+};
+
+class InstructionCodeGeneratorMIPS : public HGraphVisitor {
+ public:
+ InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr) OVERRIDE;
+
+ FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
+ << " (id " << instruction->GetId() << ")";
+ }
+
+ MipsAssembler* GetAssembler() const { return assembler_; }
+
+ private:
+ void GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg);
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+ void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+ void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleShift(HBinaryOperation* operation);
+ void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+ void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+ void GenerateImplicitNullCheck(HNullCheck* instruction);
+ void GenerateExplicitNullCheck(HNullCheck* instruction);
+ void GenerateTestAndBranch(HInstruction* instruction,
+ MipsLabel* true_target,
+ MipsLabel* false_target,
+ MipsLabel* always_true_target);
+ void HandleGoto(HInstruction* got, HBasicBlock* successor);
+
+ MipsAssembler* const assembler_;
+ CodeGeneratorMIPS* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS);
+};
+
+class CodeGeneratorMIPS : public CodeGenerator {
+ public:
+ CodeGeneratorMIPS(HGraph* graph,
+ const MipsInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
+ virtual ~CodeGeneratorMIPS() {}
+
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+
+ void Bind(HBasicBlock* block) OVERRIDE;
+
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ void Move32(Location destination, Location source);
+ void Move64(Location destination, Location source);
+ void MoveConstant(Location location, HConstant* c);
+
+ size_t GetWordSize() const OVERRIDE { return kMipsWordSize; }
+
+ size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMipsDoublewordSize; }
+
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ return assembler_.GetLabelLocation(GetLabelOf(block));
+ }
+
+ HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; }
+ const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+
+ void MarkGCCard(Register object, Register value);
+
+ // Register allocation.
+
+ void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
+ // AllocateFreeRegister() is only used when allocating registers locally
+ // during CompileBaseline().
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ // Blocks all register pairs made out of blocked core registers.
+ void UpdateBlockedPairRegisters() const;
+
+ InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; }
+
+ const MipsInstructionSetFeatures& GetInstructionSetFeatures() const {
+ return isa_features_;
+ }
+
+ MipsLabel* GetLabelOf(HBasicBlock* block) const {
+ return CommonGetLabelOf<MipsLabel>(block_labels_, block);
+ }
+
+ void Initialize() OVERRIDE {
+ block_labels_ = CommonInitializeLabels<MipsLabel>();
+ }
+
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
+ // Code generation helpers.
+
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+
+ void MoveConstant(Location destination, int32_t value);
+
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) OVERRIDE;
+
+ void InvokeRuntime(int32_t offset,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path,
+ bool is_direct_entrypoint);
+
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+
+ bool NeedsTwoRegisters(Primitive::Type type) const {
+ return type == Primitive::kPrimLong;
+ }
+
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ void GenerateVirtualCall(HInvokeVirtual* invoke ATTRIBUTE_UNUSED,
+ Location temp ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
+ }
+
+ void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
+ Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
+ }
+
+ private:
+ // Labels for each block that will be compiled.
+ MipsLabel* block_labels_;
+ MipsLabel frame_entry_label_;
+ LocationsBuilderMIPS location_builder_;
+ InstructionCodeGeneratorMIPS instruction_visitor_;
+ ParallelMoveResolverMIPS move_resolver_;
+ MipsAssembler assembler_;
+ const MipsInstructionSetFeatures& isa_features_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS);
+};
+
+} // namespace mips
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index f561c97..5f78285 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -342,8 +342,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 7799437..df3fc0d 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -217,9 +217,6 @@
Mips64Assembler* GetAssembler() const { return assembler_; }
private:
- // Generate code for the given suspend check. If not null, `successor`
- // is the block to branch to if the suspend check is not needed, and after
- // the suspend call.
void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg);
void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index fe5af2f..57de41f 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -20,6 +20,8 @@
#include "arch/arm/instruction_set_features_arm.h"
#include "arch/arm/registers_arm.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "arch/mips/instruction_set_features_mips.h"
+#include "arch/mips/registers_mips.h"
#include "arch/mips64/instruction_set_features_mips64.h"
#include "arch/mips64/registers_mips64.h"
#include "arch/x86/instruction_set_features_x86.h"
@@ -29,6 +31,7 @@
#include "builder.h"
#include "code_generator_arm.h"
#include "code_generator_arm64.h"
+#include "code_generator_mips.h"
#include "code_generator_mips64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
@@ -43,6 +46,7 @@
#include "ssa_liveness_analysis.h"
#include "utils.h"
#include "utils/arm/managed_register_arm.h"
+#include "utils/mips/managed_register_mips.h"
#include "utils/mips64/managed_register_mips64.h"
#include "utils/x86/managed_register_x86.h"
@@ -177,6 +181,14 @@
Run(allocator, codegenARM64, has_result, expected);
}
+ std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
+ MipsInstructionSetFeatures::FromCppDefines());
+ mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
+ codegenMIPS.CompileBaseline(&allocator, true);
+ if (kRuntimeISA == kMips) {
+ Run(allocator, codegenMIPS, has_result, expected);
+ }
+
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options);
@@ -234,6 +246,11 @@
X86_64InstructionSetFeatures::FromCppDefines());
x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
RunCodeOptimized(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
+ } else if (kRuntimeISA == kMips) {
+ std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
+ MipsInstructionSetFeatures::FromCppDefines());
+ mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
+ RunCodeOptimized(&codegenMIPS, graph, hook_before_codegen, has_result, expected);
} else if (kRuntimeISA == kMips64) {
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 4abe5e9..e1a8c9c 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -203,19 +203,23 @@
int64_t value = CodeGenerator::GetInt64ValueOf(constant);
- if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
- instr->IsCompare() || instr->IsBoundsCheck()) {
+ if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
+ // Uses logical operations.
+ return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
+ } else if (instr->IsNeg()) {
+ // Uses mov -immediate.
+ return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
+ } else {
+ DCHECK(instr->IsAdd() ||
+ instr->IsArm64IntermediateAddress() ||
+ instr->IsBoundsCheck() ||
+ instr->IsCompare() ||
+ instr->IsCondition() ||
+ instr->IsSub());
// Uses aliases of ADD/SUB instructions.
// If `value` does not fit but `-value` does, VIXL will automatically use
// the 'opposite' instruction.
return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value);
- } else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
- // Uses logical operations.
- return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
- } else {
- DCHECK(instr->IsNeg());
- // Uses mov -immediate.
- return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
}
}
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index b2e222f..2feb75c 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -569,7 +569,7 @@
Instruction::ADD_INT_LIT16 | 1 << 8 | 0 << 12, 5,
Instruction::GOTO | 4 << 8,
Instruction::ADD_INT_LIT16 | 0 << 8 | 2 << 12, 4,
- static_cast<uint16_t>(Instruction::GOTO | -5 << 8),
+ static_cast<uint16_t>(Instruction::GOTO | 0xFFFFFFFB << 8),
Instruction::ADD_INT_LIT16 | 2 << 8 | 1 << 12, 8,
Instruction::RETURN | 2 << 8);
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index cf0a4ac..2c6a1ef 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -140,7 +140,7 @@
Instruction::ADD_INT_LIT16 | 1 << 8 | 0 << 12, 3,
Instruction::GOTO | 4 << 8,
Instruction::ADD_INT_LIT16 | 0 << 8 | 2 << 12, 2,
- static_cast<uint16_t>(Instruction::GOTO | -5 << 8),
+ static_cast<uint16_t>(Instruction::GOTO | 0xFFFFFFFB << 8),
Instruction::ADD_INT_LIT16 | 2 << 8 | 1 << 12, 4,
Instruction::RETURN_VOID);
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 0a1758a..c36de84 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -16,11 +16,11 @@
#include "gvn.h"
+#include "base/arena_bit_vector.h"
#include "base/arena_containers.h"
#include "base/bit_vector-inl.h"
#include "side_effects_analysis.h"
#include "utils.h"
-#include "utils/arena_bit_vector.h"
namespace art {
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 56f2718..de60cf2 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -34,7 +34,10 @@
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
@@ -46,6 +49,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -54,6 +58,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -63,6 +68,7 @@
MemberOffset(43),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -74,6 +80,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -82,6 +89,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -111,7 +119,10 @@
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
@@ -122,6 +133,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -144,6 +156,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -153,6 +166,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -162,6 +176,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -188,7 +203,10 @@
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
@@ -199,6 +217,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -221,6 +240,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -235,6 +255,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -244,6 +265,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -255,6 +277,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -328,7 +351,10 @@
inner_loop_body->AddSuccessor(inner_loop_header);
inner_loop_exit->AddSuccessor(outer_loop_header);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimBoolean);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimBoolean);
entry->AddInstruction(parameter);
entry->AddInstruction(new (&allocator) HGoto());
outer_loop_header->AddInstruction(new (&allocator) HIf(parameter));
@@ -352,6 +378,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0));
@@ -376,6 +403,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0),
@@ -401,6 +429,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0),
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index fd1e334..f16da2a 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -77,7 +77,8 @@
graph_->SetExitBlock(exit_);
// Provide entry and exit instructions.
- parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot, true);
+ parameter_ = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimNot, true);
entry_->AddInstruction(parameter_);
constant0_ = graph_->GetIntConstant(0);
constant1_ = graph_->GetIntConstant(1);
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 56f661e..8fbc59f 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -415,7 +415,8 @@
}
TEST_F(InductionVarRangeTest, FindRangeSymbolicTripCount) {
- HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry_block_->AddInstruction(parameter);
BuildLoop(parameter);
PerformInductionVarAnalysis();
@@ -433,7 +434,8 @@
}
TEST_F(InductionVarRangeTest, CodeGeneration) {
- HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry_block_->AddInstruction(parameter);
BuildLoop(parameter);
PerformInductionVarAnalysis();
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index f3b5f08..e2aca30 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -495,6 +495,9 @@
number_of_inlined_instructions_ += number_of_instructions;
HInstruction* return_replacement = callee_graph->InlineInto(graph_, invoke_instruction);
+ if (return_replacement != nullptr) {
+ DCHECK_EQ(graph_, return_replacement->GetBlock()->GetGraph());
+ }
// When merging the graph we might create a new NullConstant in the caller graph which does
// not have the chance to be typed. We assign the correct type here so that we can keep the
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 7814eb9..b97dc1a 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -625,9 +625,9 @@
// Try to fold an HCompare into this HCondition.
// This simplification is currently supported on x86, x86_64, ARM and ARM64.
- // TODO: Implement it for MIPS64.
+ // TODO: Implement it for MIPS and MIPS64.
InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- if (instruction_set == kMips64) {
+ if (instruction_set == kMips || instruction_set == kMips64) {
return;
}
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 4b2d36f..eb79f46 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -16,8 +16,65 @@
#include "instruction_simplifier_arm64.h"
+#include "mirror/array-inl.h"
+
namespace art {
namespace arm64 {
+void InstructionSimplifierArm64Visitor::TryExtractArrayAccessAddress(HInstruction* access,
+ HInstruction* array,
+ HInstruction* index,
+ int access_size) {
+ if (index->IsConstant() ||
+ (index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) {
+ // When the index is a constant all the addressing can be fitted in the
+ // memory access instruction, so do not split the access.
+ return;
+ }
+ if (access->IsArraySet() &&
+ access->AsArraySet()->GetValue()->GetType() == Primitive::kPrimNot) {
+ // The access may require a runtime call or the original array pointer.
+ return;
+ }
+
+ // Proceed to extract the base address computation.
+ ArenaAllocator* arena = GetGraph()->GetArena();
+
+ HIntConstant* offset =
+ GetGraph()->GetIntConstant(mirror::Array::DataOffset(access_size).Uint32Value());
+ HArm64IntermediateAddress* address =
+ new (arena) HArm64IntermediateAddress(array, offset, kNoDexPc);
+ access->GetBlock()->InsertInstructionBefore(address, access);
+ access->ReplaceInput(address, 0);
+ // Both instructions must depend on GC to prevent any instruction that can
+ // trigger GC to be inserted between the two.
+ access->AddSideEffects(SideEffects::DependsOnGC());
+ DCHECK(address->GetSideEffects().Includes(SideEffects::DependsOnGC()));
+ DCHECK(access->GetSideEffects().Includes(SideEffects::DependsOnGC()));
+ // TODO: Code generation for HArrayGet and HArraySet will check whether the input address
+ // is an HArm64IntermediateAddress and generate appropriate code.
+ // We would like to replace the `HArrayGet` and `HArraySet` with custom instructions (maybe
+ // `HArm64Load` and `HArm64Store`). We defer these changes because these new instructions would
+ // not bring any advantages yet.
+ // Also see the comments in
+ // `InstructionCodeGeneratorARM64::VisitArrayGet()` and
+ // `InstructionCodeGeneratorARM64::VisitArraySet()`.
+ RecordSimplification();
+}
+
+void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
+ TryExtractArrayAccessAddress(instruction,
+ instruction->GetArray(),
+ instruction->GetIndex(),
+ Primitive::ComponentSize(instruction->GetType()));
+}
+
+void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) {
+ TryExtractArrayAccessAddress(instruction,
+ instruction->GetArray(),
+ instruction->GetIndex(),
+ Primitive::ComponentSize(instruction->GetComponentType()));
+}
+
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index d7f4eae..4b697db 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -35,6 +35,14 @@
}
}
+ void TryExtractArrayAccessAddress(HInstruction* access,
+ HInstruction* array,
+ HInstruction* index,
+ int access_size);
+
+ void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
+ void VisitArraySet(HArraySet* instruction) OVERRIDE;
+
OptimizingCompilerStats* stats_;
};
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 58e479a..0a5acc3 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -961,6 +961,14 @@
CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
}
void IntrinsicLocationsBuilderARM::VisitUnsafeCASObject(HInvoke* invoke) {
+ // The UnsafeCASObject intrinsic does not always work when heap
+ // poisoning is enabled (it breaks run-test 004-UnsafeTest); turn it
+ // off temporarily as a quick fix.
+ // TODO(rpl): Fix it and turn it back on.
+ if (kPoisonHeapReferences) {
+ return;
+ }
+
CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
}
void IntrinsicCodeGeneratorARM::VisitUnsafeCASInt(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 4da94ee..059abf0 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1087,6 +1087,14 @@
CreateIntIntIntIntIntToInt(arena_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) {
+ // The UnsafeCASObject intrinsic does not always work when heap
+ // poisoning is enabled (it breaks run-test 004-UnsafeTest); turn it
+ // off temporarily as a quick fix.
+ // TODO(rpl): Fix it and turn it back on.
+ if (kPoisonHeapReferences) {
+ return;
+ }
+
CreateIntIntIntIntIntToInt(arena_, invoke);
}
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index e83aebb..040bf6a 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -45,7 +45,7 @@
X86Assembler* IntrinsicCodeGeneratorX86::GetAssembler() {
- return reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
+ return down_cast<X86Assembler*>(codegen_->GetAssembler());
}
ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() {
@@ -1728,7 +1728,7 @@
Primitive::Type type,
bool is_volatile,
CodeGeneratorX86* codegen) {
- X86Assembler* assembler = reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
+ X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler());
Register base = locations->InAt(1).AsRegister<Register>();
Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
Location value_loc = locations->InAt(3);
@@ -1822,7 +1822,7 @@
locations->SetOut(Location::RegisterLocation(EAX));
if (type == Primitive::kPrimNot) {
// Need temp registers for card-marking.
- locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too.
// Need a byte register for marking.
locations->AddTemp(Location::RegisterLocation(ECX));
}
@@ -1841,8 +1841,7 @@
}
static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) {
- X86Assembler* assembler =
- reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
+ X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler());
LocationSummary* locations = invoke->GetLocations();
Register base = locations->InAt(1).AsRegister<Register>();
@@ -1850,47 +1849,92 @@
Location out = locations->Out();
DCHECK_EQ(out.AsRegister<Register>(), EAX);
- if (type == Primitive::kPrimLong) {
- DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX);
- DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX);
- DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX);
- DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX);
- __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0));
- } else {
- // Integer or object.
+ if (type == Primitive::kPrimNot) {
Register expected = locations->InAt(3).AsRegister<Register>();
+ // Ensure `expected` is in EAX (required by the CMPXCHG instruction).
DCHECK_EQ(expected, EAX);
Register value = locations->InAt(4).AsRegister<Register>();
- if (type == Primitive::kPrimNot) {
- // Mark card for object assuming new value is stored.
- bool value_can_be_null = true; // TODO: Worth finding out this information?
- codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
- locations->GetTemp(1).AsRegister<Register>(),
- base,
- value,
- value_can_be_null);
- if (kPoisonHeapReferences) {
- __ PoisonHeapReference(expected);
- __ PoisonHeapReference(value);
+ // Mark card for object assuming new value is stored.
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
+ codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
+ locations->GetTemp(1).AsRegister<Register>(),
+ base,
+ value,
+ value_can_be_null);
+
+ bool base_equals_value = (base == value);
+ if (kPoisonHeapReferences) {
+ if (base_equals_value) {
+ // If `base` and `value` are the same register location, move
+ // `value` to a temporary register. This way, poisoning
+ // `value` won't invalidate `base`.
+ value = locations->GetTemp(0).AsRegister<Register>();
+ __ movl(value, base);
}
+
+ // Check that the register allocator did not assign the location
+ // of `expected` (EAX) to `value` nor to `base`, so that heap
+ // poisoning (when enabled) works as intended below.
+ // - If `value` were equal to `expected`, both references would
+ // be poisoned twice, meaning they would not be poisoned at
+ // all, as heap poisoning uses address negation.
+ // - If `base` were equal to `expected`, poisoning `expected`
+ // would invalidate `base`.
+ DCHECK_NE(value, expected);
+ DCHECK_NE(base, expected);
+
+ __ PoisonHeapReference(expected);
+ __ PoisonHeapReference(value);
}
__ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
- }
- // locked cmpxchg has full barrier semantics, and we don't need scheduling
- // barriers at this time.
+ // locked cmpxchg has full barrier semantics, and we don't need
+ // scheduling barriers at this time.
- // Convert ZF into the boolean result.
- __ setb(kZero, out.AsRegister<Register>());
- __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
+ // Convert ZF into the boolean result.
+ __ setb(kZero, out.AsRegister<Register>());
+ __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
- if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
- Register value = locations->InAt(4).AsRegister<Register>();
- __ UnpoisonHeapReference(value);
- // Do not unpoison the reference contained in register `expected`,
- // as it is the same as register `out`.
+ if (kPoisonHeapReferences) {
+ if (base_equals_value) {
+ // `value` has been moved to a temporary register, no need to
+ // unpoison it.
+ } else {
+ // Ensure `value` is different from `out`, so that unpoisoning
+ // the former does not invalidate the latter.
+ DCHECK_NE(value, out.AsRegister<Register>());
+ __ UnpoisonHeapReference(value);
+ }
+ // Do not unpoison the reference contained in register
+ // `expected`, as it is the same as register `out` (EAX).
+ }
+ } else {
+ if (type == Primitive::kPrimInt) {
+ // Ensure the expected value is in EAX (required by the CMPXCHG
+ // instruction).
+ DCHECK_EQ(locations->InAt(3).AsRegister<Register>(), EAX);
+ __ LockCmpxchgl(Address(base, offset, TIMES_1, 0),
+ locations->InAt(4).AsRegister<Register>());
+ } else if (type == Primitive::kPrimLong) {
+ // Ensure the expected value is in EAX:EDX and that the new
+ // value is in EBX:ECX (required by the CMPXCHG8B instruction).
+ DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX);
+ DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX);
+ DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX);
+ DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX);
+ __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0));
+ } else {
+ LOG(FATAL) << "Unexpected CAS type " << type;
+ }
+
+ // locked cmpxchg has full barrier semantics, and we don't need
+ // scheduling barriers at this time.
+
+ // Convert ZF into the boolean result.
+ __ setb(kZero, out.AsRegister<Register>());
+ __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
}
}
@@ -1928,8 +1972,7 @@
}
void IntrinsicCodeGeneratorX86::VisitIntegerReverse(HInvoke* invoke) {
- X86Assembler* assembler =
- reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
+ X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
LocationSummary* locations = invoke->GetLocations();
Register reg = locations->InAt(0).AsRegister<Register>();
@@ -1960,8 +2003,7 @@
}
void IntrinsicCodeGeneratorX86::VisitLongReverse(HInvoke* invoke) {
- X86Assembler* assembler =
- reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
+ X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
LocationSummary* locations = invoke->GetLocations();
Register reg_low = locations->InAt(0).AsRegisterPairLow<Register>();
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index e0d88a9..14c65c9 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -41,7 +41,7 @@
X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() {
- return reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler());
+ return down_cast<X86_64Assembler*>(codegen_->GetAssembler());
}
ArenaAllocator* IntrinsicCodeGeneratorX86_64::GetAllocator() {
@@ -1822,7 +1822,7 @@
// memory model.
static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool is_volatile,
CodeGeneratorX86_64* codegen) {
- X86_64Assembler* assembler = reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler());
+ X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler());
CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>();
CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>();
CpuRegister value = locations->InAt(3).AsRegister<CpuRegister>();
@@ -1895,7 +1895,7 @@
locations->SetOut(Location::RequiresRegister());
if (type == Primitive::kPrimNot) {
// Need temp registers for card-marking.
- locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too.
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -1913,49 +1913,91 @@
}
static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* codegen) {
- X86_64Assembler* assembler =
- reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler());
+ X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler());
LocationSummary* locations = invoke->GetLocations();
CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>();
CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>();
CpuRegister expected = locations->InAt(3).AsRegister<CpuRegister>();
+ // Ensure `expected` is in RAX (required by the CMPXCHG instruction).
DCHECK_EQ(expected.AsRegister(), RAX);
CpuRegister value = locations->InAt(4).AsRegister<CpuRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- if (type == Primitive::kPrimLong) {
- __ LockCmpxchgq(Address(base, offset, TIMES_1, 0), value);
- } else {
- // Integer or object.
- if (type == Primitive::kPrimNot) {
- // Mark card for object assuming new value is stored.
- bool value_can_be_null = true; // TODO: Worth finding out this information?
- codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
- locations->GetTemp(1).AsRegister<CpuRegister>(),
- base,
- value,
- value_can_be_null);
+ if (type == Primitive::kPrimNot) {
+ // Mark card for object assuming new value is stored.
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
+ codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
+ locations->GetTemp(1).AsRegister<CpuRegister>(),
+ base,
+ value,
+ value_can_be_null);
- if (kPoisonHeapReferences) {
- __ PoisonHeapReference(expected);
- __ PoisonHeapReference(value);
+ bool base_equals_value = (base.AsRegister() == value.AsRegister());
+ Register value_reg = value.AsRegister();
+ if (kPoisonHeapReferences) {
+ if (base_equals_value) {
+ // If `base` and `value` are the same register location, move
+ // `value_reg` to a temporary register. This way, poisoning
+ // `value_reg` won't invalidate `base`.
+ value_reg = locations->GetTemp(0).AsRegister<CpuRegister>().AsRegister();
+ __ movl(CpuRegister(value_reg), base);
}
+
+ // Check that the register allocator did not assign the location
+ // of `expected` (RAX) to `value` nor to `base`, so that heap
+ // poisoning (when enabled) works as intended below.
+ // - If `value` were equal to `expected`, both references would
+ // be poisoned twice, meaning they would not be poisoned at
+ // all, as heap poisoning uses address negation.
+ // - If `base` were equal to `expected`, poisoning `expected`
+ // would invalidate `base`.
+ DCHECK_NE(value_reg, expected.AsRegister());
+ DCHECK_NE(base.AsRegister(), expected.AsRegister());
+
+ __ PoisonHeapReference(expected);
+ __ PoisonHeapReference(CpuRegister(value_reg));
}
- __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
- }
+ __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), CpuRegister(value_reg));
- // locked cmpxchg has full barrier semantics, and we don't need scheduling
- // barriers at this time.
+ // locked cmpxchg has full barrier semantics, and we don't need
+ // scheduling barriers at this time.
- // Convert ZF into the boolean result.
- __ setcc(kZero, out);
- __ movzxb(out, out);
+ // Convert ZF into the boolean result.
+ __ setcc(kZero, out);
+ __ movzxb(out, out);
- if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
- __ UnpoisonHeapReference(value);
- __ UnpoisonHeapReference(expected);
+ if (kPoisonHeapReferences) {
+ if (base_equals_value) {
+ // `value_reg` has been moved to a temporary register, no need
+ // to unpoison it.
+ } else {
+ // Ensure `value` is different from `out`, so that unpoisoning
+ // the former does not invalidate the latter.
+ DCHECK_NE(value_reg, out.AsRegister());
+ __ UnpoisonHeapReference(CpuRegister(value_reg));
+ }
+ // Ensure `expected` is different from `out`, so that unpoisoning
+ // the former does not invalidate the latter.
+ DCHECK_NE(expected.AsRegister(), out.AsRegister());
+ __ UnpoisonHeapReference(expected);
+ }
+ } else {
+ if (type == Primitive::kPrimInt) {
+ __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
+ } else if (type == Primitive::kPrimLong) {
+ __ LockCmpxchgq(Address(base, offset, TIMES_1, 0), value);
+ } else {
+ LOG(FATAL) << "Unexpected CAS type " << type;
+ }
+
+ // locked cmpxchg has full barrier semantics, and we don't need
+ // scheduling barriers at this time.
+
+ // Convert ZF into the boolean result.
+ __ setcc(kZero, out);
+ __ movzxb(out, out);
}
}
@@ -1993,8 +2035,7 @@
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerReverse(HInvoke* invoke) {
- X86_64Assembler* assembler =
- reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler());
+ X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen_->GetAssembler());
LocationSummary* locations = invoke->GetLocations();
CpuRegister reg = locations->InAt(0).AsRegister<CpuRegister>();
@@ -2038,8 +2079,7 @@
}
void IntrinsicCodeGeneratorX86_64::VisitLongReverse(HInvoke* invoke) {
- X86_64Assembler* assembler =
- reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler());
+ X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen_->GetAssembler());
LocationSummary* locations = invoke->GetLocations();
CpuRegister reg = locations->InAt(0).AsRegister<CpuRegister>();
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 558892d..47457de 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -61,7 +61,7 @@
loop_body_->AddSuccessor(loop_header_);
// Provide boiler-plate instructions.
- parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry_->AddInstruction(parameter_);
constant_ = graph_->GetIntConstant(42);
loop_preheader_->AddInstruction(new (&allocator_) HGoto());
@@ -104,13 +104,19 @@
// Populate the loop with instructions: set/get field with different types.
NullHandle<mirror::DexCache> dex_cache;
- HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
- parameter_, Primitive::kPrimLong, MemberOffset(10),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+ HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+ Primitive::kPrimLong,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ dex_cache,
+ 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
parameter_, constant_, Primitive::kPrimInt, MemberOffset(20),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+ false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), dex_cache, 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -125,13 +131,26 @@
// Populate the loop with instructions: set/get field with same types.
NullHandle<mirror::DexCache> dex_cache;
- HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
- parameter_, Primitive::kPrimLong, MemberOffset(10),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+ HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+ Primitive::kPrimLong,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ dex_cache,
+ 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
- HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
- parameter_, get_field, Primitive::kPrimLong, MemberOffset(10),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+ HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_,
+ get_field,
+ Primitive::kPrimLong,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ dex_cache,
+ 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
new file mode 100644
index 0000000..90f28e5
--- /dev/null
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -0,0 +1,913 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "load_store_elimination.h"
+#include "side_effects_analysis.h"
+
+#include <iostream>
+
+namespace art {
+
+class ReferenceInfo;
+
+// A cap for the number of heap locations to prevent pathological time/space consumption.
+// The number of heap locations for most of the methods stays below this threshold.
+constexpr size_t kMaxNumberOfHeapLocations = 32;
+
+// A ReferenceInfo contains additional info about a reference such as
+// whether it's a singleton, returned, etc.
+class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
+ public:
+ ReferenceInfo(HInstruction* reference, size_t pos) : reference_(reference), position_(pos) {
+ is_singleton_ = true;
+ is_singleton_and_not_returned_ = true;
+ if (!reference_->IsNewInstance() && !reference_->IsNewArray()) {
+ // For references not allocated in the method, don't assume anything.
+ is_singleton_ = false;
+ is_singleton_and_not_returned_ = false;
+ return;
+ }
+
+ // Visit all uses to determine if this reference can spread into the heap,
+ // a method call, etc.
+ for (HUseIterator<HInstruction*> use_it(reference_->GetUses());
+ !use_it.Done();
+ use_it.Advance()) {
+ HInstruction* use = use_it.Current()->GetUser();
+ DCHECK(!use->IsNullCheck()) << "NullCheck should have been eliminated";
+ if (use->IsBoundType()) {
+ // BoundType shouldn't normally be necessary for a NewInstance.
+ // Just be conservative for the uncommon cases.
+ is_singleton_ = false;
+ is_singleton_and_not_returned_ = false;
+ return;
+ }
+ if (use->IsPhi() || use->IsInvoke() ||
+ (use->IsInstanceFieldSet() && (reference_ == use->InputAt(1))) ||
+ (use->IsUnresolvedInstanceFieldSet() && (reference_ == use->InputAt(1))) ||
+ (use->IsStaticFieldSet() && (reference_ == use->InputAt(1))) ||
+ (use->IsUnresolvedStaticFieldSet() && (reference_ == use->InputAt(1))) ||
+ (use->IsArraySet() && (reference_ == use->InputAt(2)))) {
+ // reference_ is merged to a phi, passed to a callee, or stored to heap.
+ // reference_ isn't the only name that can refer to its value anymore.
+ is_singleton_ = false;
+ is_singleton_and_not_returned_ = false;
+ return;
+ }
+ if (use->IsReturn()) {
+ is_singleton_and_not_returned_ = false;
+ }
+ }
+ }
+
+ HInstruction* GetReference() const {
+ return reference_;
+ }
+
+ size_t GetPosition() const {
+ return position_;
+ }
+
+ // Returns true if reference_ is the only name that can refer to its value during
+ // the lifetime of the method. So it's guaranteed to not have any alias in
+ // the method (including its callees).
+ bool IsSingleton() const {
+ return is_singleton_;
+ }
+
+ // Returns true if reference_ is a singleton and not returned to the caller.
+ // The allocation and stores into reference_ may be eliminated for such cases.
+ bool IsSingletonAndNotReturned() const {
+ return is_singleton_and_not_returned_;
+ }
+
+ private:
+ HInstruction* const reference_;
+ const size_t position_; // position in HeapLocationCollector's ref_info_array_.
+ bool is_singleton_; // can only be referred to by a single name in the method.
+ bool is_singleton_and_not_returned_; // reference_ is singleton and not returned to caller.
+
+ DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
+};
+
+// A heap location is a reference-offset/index pair that a value can be loaded from
+// or stored to.
+class HeapLocation : public ArenaObject<kArenaAllocMisc> {
+ public:
+ static constexpr size_t kInvalidFieldOffset = -1;
+
+ // TODO: more fine-grained array types.
+ static constexpr int16_t kDeclaringClassDefIndexForArrays = -1;
+
+ HeapLocation(ReferenceInfo* ref_info,
+ size_t offset,
+ HInstruction* index,
+ int16_t declaring_class_def_index)
+ : ref_info_(ref_info),
+ offset_(offset),
+ index_(index),
+ declaring_class_def_index_(declaring_class_def_index),
+ may_become_unknown_(true) {
+ DCHECK(ref_info != nullptr);
+ DCHECK((offset == kInvalidFieldOffset && index != nullptr) ||
+ (offset != kInvalidFieldOffset && index == nullptr));
+
+ if (ref_info->IsSingletonAndNotReturned()) {
+ // We try to track stores to singletons that aren't returned to eliminate the stores
+ // since values in singleton's fields cannot be killed due to aliasing. Those values
+ // can still be killed due to merging values since we don't build phi for merging heap
+ // values. SetMayBecomeUnknown(true) may be called later once such merge becomes possible.
+ may_become_unknown_ = false;
+ }
+ }
+
+ ReferenceInfo* GetReferenceInfo() const { return ref_info_; }
+ size_t GetOffset() const { return offset_; }
+ HInstruction* GetIndex() const { return index_; }
+
+ // Returns the definition of declaring class' dex index.
+ // It's kDeclaringClassDefIndexForArrays for an array element.
+ int16_t GetDeclaringClassDefIndex() const {
+ return declaring_class_def_index_;
+ }
+
+ bool IsArrayElement() const {
+ return index_ != nullptr;
+ }
+
+ // Returns true if this heap location's value may become unknown after it's
+ // set to a value, due to merge of values, or killed due to aliasing.
+ bool MayBecomeUnknown() const {
+ return may_become_unknown_;
+ }
+ void SetMayBecomeUnknown(bool val) {
+ may_become_unknown_ = val;
+ }
+
+ private:
+ ReferenceInfo* const ref_info_; // reference for instance/static field or array access.
+ const size_t offset_; // offset of static/instance field.
+ HInstruction* const index_; // index of an array element.
+ const int16_t declaring_class_def_index_; // declaring class's def's dex index.
+ bool may_become_unknown_; // value may become kUnknownHeapValue.
+
+ DISALLOW_COPY_AND_ASSIGN(HeapLocation);
+};
+
+static HInstruction* HuntForOriginalReference(HInstruction* ref) {
+ DCHECK(ref != nullptr);
+ while (ref->IsNullCheck() || ref->IsBoundType()) {
+ ref = ref->InputAt(0);
+ }
+ return ref;
+}
+
+// A HeapLocationCollector collects all relevant heap locations and keeps
+// an aliasing matrix for all locations.
+class HeapLocationCollector : public HGraphVisitor {
+ public:
+ static constexpr size_t kHeapLocationNotFound = -1;
+ // Start with a single uint32_t word. That's enough bits for pair-wise
+ // aliasing matrix of 8 heap locations.
+ static constexpr uint32_t kInitialAliasingMatrixBitVectorSize = 32;
+
+ explicit HeapLocationCollector(HGraph* graph)
+ : HGraphVisitor(graph),
+ ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+ heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+ aliasing_matrix_(graph->GetArena(), kInitialAliasingMatrixBitVectorSize, true),
+ has_heap_stores_(false),
+ has_volatile_(false),
+ has_monitor_operations_(false),
+ may_deoptimize_(false) {}
+
+ size_t GetNumberOfHeapLocations() const {
+ return heap_locations_.size();
+ }
+
+ HeapLocation* GetHeapLocation(size_t index) const {
+ return heap_locations_[index];
+ }
+
+ ReferenceInfo* FindReferenceInfoOf(HInstruction* ref) const {
+ for (size_t i = 0; i < ref_info_array_.size(); i++) {
+ ReferenceInfo* ref_info = ref_info_array_[i];
+ if (ref_info->GetReference() == ref) {
+ DCHECK_EQ(i, ref_info->GetPosition());
+ return ref_info;
+ }
+ }
+ return nullptr;
+ }
+
+ bool HasHeapStores() const {
+ return has_heap_stores_;
+ }
+
+ bool HasVolatile() const {
+ return has_volatile_;
+ }
+
+ bool HasMonitorOps() const {
+ return has_monitor_operations_;
+ }
+
+ // Returns whether this method may be deoptimized.
+ // Currently we don't have meta data support for deoptimizing
+ // a method that eliminates allocations/stores.
+ bool MayDeoptimize() const {
+ return may_deoptimize_;
+ }
+
+ // Find and return the heap location index in heap_locations_.
+ size_t FindHeapLocationIndex(ReferenceInfo* ref_info,
+ size_t offset,
+ HInstruction* index,
+ int16_t declaring_class_def_index) const {
+ for (size_t i = 0; i < heap_locations_.size(); i++) {
+ HeapLocation* loc = heap_locations_[i];
+ if (loc->GetReferenceInfo() == ref_info &&
+ loc->GetOffset() == offset &&
+ loc->GetIndex() == index &&
+ loc->GetDeclaringClassDefIndex() == declaring_class_def_index) {
+ return i;
+ }
+ }
+ return kHeapLocationNotFound;
+ }
+
+ // Returns true if heap_locations_[index1] and heap_locations_[index2] may alias.
+ bool MayAlias(size_t index1, size_t index2) const {
+ if (index1 < index2) {
+ return aliasing_matrix_.IsBitSet(AliasingMatrixPosition(index1, index2));
+ } else if (index1 > index2) {
+ return aliasing_matrix_.IsBitSet(AliasingMatrixPosition(index2, index1));
+ } else {
+ DCHECK(false) << "index1 and index2 are expected to be different";
+ return true;
+ }
+ }
+
+ void BuildAliasingMatrix() {
+ const size_t number_of_locations = heap_locations_.size();
+ if (number_of_locations == 0) {
+ return;
+ }
+ size_t pos = 0;
+ // Compute aliasing info between every pair of different heap locations.
+ // Save the result in a matrix represented as a BitVector.
+ for (size_t i = 0; i < number_of_locations - 1; i++) {
+ for (size_t j = i + 1; j < number_of_locations; j++) {
+ if (ComputeMayAlias(i, j)) {
+ aliasing_matrix_.SetBit(CheckedAliasingMatrixPosition(i, j, pos));
+ }
+ pos++;
+ }
+ }
+ }
+
+ private:
+ // An allocation cannot alias with a name which already exists at the point
+ // of the allocation, such as a parameter or a load happening before the allocation.
+ bool MayAliasWithPreexistenceChecking(ReferenceInfo* ref_info1, ReferenceInfo* ref_info2) const {
+ if (ref_info1->GetReference()->IsNewInstance() || ref_info1->GetReference()->IsNewArray()) {
+ // Any reference that can alias with the allocation must appear after it in the block/in
+ // the block's successors. In reverse post order, those instructions will be visited after
+ // the allocation.
+ return ref_info2->GetPosition() >= ref_info1->GetPosition();
+ }
+ return true;
+ }
+
+ bool CanReferencesAlias(ReferenceInfo* ref_info1, ReferenceInfo* ref_info2) const {
+ if (ref_info1 == ref_info2) {
+ return true;
+ } else if (ref_info1->IsSingleton()) {
+ return false;
+ } else if (ref_info2->IsSingleton()) {
+ return false;
+ } else if (!MayAliasWithPreexistenceChecking(ref_info1, ref_info2) ||
+ !MayAliasWithPreexistenceChecking(ref_info2, ref_info1)) {
+ return false;
+ }
+ return true;
+ }
+
+ // `index1` and `index2` are indices in the array of collected heap locations.
+ // Returns the position in the bit vector that tracks whether the two heap
+ // locations may alias.
+ size_t AliasingMatrixPosition(size_t index1, size_t index2) const {
+ DCHECK(index2 > index1);
+ const size_t number_of_locations = heap_locations_.size();
+ // It's (num_of_locations - 1) + ... + (num_of_locations - index1) + (index2 - index1 - 1).
+ return (number_of_locations * index1 - (1 + index1) * index1 / 2 + (index2 - index1 - 1));
+ }
+
+ // An additional position is passed in to make sure the calculated position is correct.
+ size_t CheckedAliasingMatrixPosition(size_t index1, size_t index2, size_t position) {
+ size_t calculated_position = AliasingMatrixPosition(index1, index2);
+ DCHECK_EQ(calculated_position, position);
+ return calculated_position;
+ }
+
+ // Compute if two locations may alias to each other.
+ bool ComputeMayAlias(size_t index1, size_t index2) const {
+ HeapLocation* loc1 = heap_locations_[index1];
+ HeapLocation* loc2 = heap_locations_[index2];
+ if (loc1->GetOffset() != loc2->GetOffset()) {
+ // Either two different instance fields, or one is an instance
+ // field and the other is an array element.
+ return false;
+ }
+ if (loc1->GetDeclaringClassDefIndex() != loc2->GetDeclaringClassDefIndex()) {
+ // Different types.
+ return false;
+ }
+ if (!CanReferencesAlias(loc1->GetReferenceInfo(), loc2->GetReferenceInfo())) {
+ return false;
+ }
+ if (loc1->IsArrayElement() && loc2->IsArrayElement()) {
+ HInstruction* array_index1 = loc1->GetIndex();
+ HInstruction* array_index2 = loc2->GetIndex();
+ DCHECK(array_index1 != nullptr);
+ DCHECK(array_index2 != nullptr);
+ if (array_index1->IsIntConstant() &&
+ array_index2->IsIntConstant() &&
+ array_index1->AsIntConstant()->GetValue() != array_index2->AsIntConstant()->GetValue()) {
+ // Different constant indices do not alias.
+ return false;
+ }
+ }
+ return true;
+ }
+
+ ReferenceInfo* GetOrCreateReferenceInfo(HInstruction* ref) {
+ ReferenceInfo* ref_info = FindReferenceInfoOf(ref);
+ if (ref_info == nullptr) {
+ size_t pos = ref_info_array_.size();
+ ref_info = new (GetGraph()->GetArena()) ReferenceInfo(ref, pos);
+ ref_info_array_.push_back(ref_info);
+ }
+ return ref_info;
+ }
+
+ HeapLocation* GetOrCreateHeapLocation(HInstruction* ref,
+ size_t offset,
+ HInstruction* index,
+ int16_t declaring_class_def_index) {
+ HInstruction* original_ref = HuntForOriginalReference(ref);
+ ReferenceInfo* ref_info = GetOrCreateReferenceInfo(original_ref);
+ size_t heap_location_idx = FindHeapLocationIndex(
+ ref_info, offset, index, declaring_class_def_index);
+ if (heap_location_idx == kHeapLocationNotFound) {
+ HeapLocation* heap_loc = new (GetGraph()->GetArena())
+ HeapLocation(ref_info, offset, index, declaring_class_def_index);
+ heap_locations_.push_back(heap_loc);
+ return heap_loc;
+ }
+ return heap_locations_[heap_location_idx];
+ }
+
+ void VisitFieldAccess(HInstruction* field_access,
+ HInstruction* ref,
+ const FieldInfo& field_info,
+ bool is_store) {
+ if (field_info.IsVolatile()) {
+ has_volatile_ = true;
+ }
+ const uint16_t declaring_class_def_index = field_info.GetDeclaringClassDefIndex();
+ const size_t offset = field_info.GetFieldOffset().SizeValue();
+ HeapLocation* location = GetOrCreateHeapLocation(ref, offset, nullptr, declaring_class_def_index);
+ // A store of a value may be eliminated if all future loads for that value can be eliminated.
+ // For a value that's stored into a singleton field, the value will not be killed due
+ // to aliasing. However if the value is set in a block that doesn't post dominate the definition,
+ // the value may be killed due to merging later. Before we have post dominating info, we check
+ // if the store is in the same block as the definition just to be conservative.
+ if (is_store &&
+ location->GetReferenceInfo()->IsSingletonAndNotReturned() &&
+ field_access->GetBlock() != ref->GetBlock()) {
+ location->SetMayBecomeUnknown(true);
+ }
+ }
+
+ void VisitArrayAccess(HInstruction* array, HInstruction* index) {
+ GetOrCreateHeapLocation(array, HeapLocation::kInvalidFieldOffset,
+ index, HeapLocation::kDeclaringClassDefIndexForArrays);
+ }
+
+ void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+ VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), false);
+ }
+
+ void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+ VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), true);
+ has_heap_stores_ = true;
+ }
+
+ void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+ VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), false);
+ }
+
+ void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+ VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), true);
+ has_heap_stores_ = true;
+ }
+
+ // We intentionally don't collect HUnresolvedInstanceField/HUnresolvedStaticField accesses
+ // since we cannot accurately track the fields.
+
+ void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+ VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+ }
+
+ void VisitArraySet(HArraySet* instruction) OVERRIDE {
+ VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+ has_heap_stores_ = true;
+ }
+
+ void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+ // Any references appearing in the ref_info_array_ so far cannot alias with new_instance.
+ GetOrCreateReferenceInfo(new_instance);
+ }
+
+ void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) OVERRIDE {
+ may_deoptimize_ = true;
+ }
+
+ void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
+ has_monitor_operations_ = true;
+ }
+
+ ArenaVector<ReferenceInfo*> ref_info_array_; // All references used for heap accesses.
+ ArenaVector<HeapLocation*> heap_locations_; // All heap locations.
+ ArenaBitVector aliasing_matrix_; // aliasing info between each pair of locations.
+ bool has_heap_stores_; // If there is no heap stores, LSE acts as GVN with better
+ // alias analysis and won't be as effective.
+ bool has_volatile_; // If there are volatile field accesses.
+ bool has_monitor_operations_; // If there are monitor operations.
+ bool may_deoptimize_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapLocationCollector);
+};
+
+// An unknown heap value. Loads with such a value in the heap location cannot be eliminated.
+static HInstruction* const kUnknownHeapValue =
+ reinterpret_cast<HInstruction*>(static_cast<uintptr_t>(-1));
+// Default heap value after an allocation.
+static HInstruction* const kDefaultHeapValue =
+ reinterpret_cast<HInstruction*>(static_cast<uintptr_t>(-2));
+
+class LSEVisitor : public HGraphVisitor {
+ public:
+ LSEVisitor(HGraph* graph,
+ const HeapLocationCollector& heap_locations_collector,
+ const SideEffectsAnalysis& side_effects)
+ : HGraphVisitor(graph),
+ heap_location_collector_(heap_locations_collector),
+ side_effects_(side_effects),
+ heap_values_for_(graph->GetBlocks().size(),
+ ArenaVector<HInstruction*>(heap_locations_collector.
+ GetNumberOfHeapLocations(),
+ kUnknownHeapValue,
+ graph->GetArena()->Adapter(kArenaAllocLSE)),
+ graph->GetArena()->Adapter(kArenaAllocLSE)),
+ removed_instructions_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+ substitute_instructions_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+ singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)) {
+ }
+
+ void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ int block_id = block->GetBlockId();
+ ArenaVector<HInstruction*>& heap_values = heap_values_for_[block_id];
+ // TODO: try to reuse the heap_values array from one predecessor if possible.
+ if (block->IsLoopHeader()) {
+ // We do a single pass in reverse post order. For loops, use the side effects as a hint
+ // to see if the heap values should be killed.
+ if (side_effects_.GetLoopEffects(block).DoesAnyWrite()) {
+ // Leave all values as kUnknownHeapValue.
+ } else {
+ // Inherit the values from pre-header.
+ HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
+ ArenaVector<HInstruction*>& pre_header_heap_values =
+ heap_values_for_[pre_header->GetBlockId()];
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ heap_values[i] = pre_header_heap_values[i];
+ }
+ }
+ } else {
+ MergePredecessorValues(block);
+ }
+ HGraphVisitor::VisitBasicBlock(block);
+ }
+
+ // Remove recorded instructions that should be eliminated.
+ void RemoveInstructions() {
+ size_t size = removed_instructions_.size();
+ DCHECK_EQ(size, substitute_instructions_.size());
+ for (size_t i = 0; i < size; i++) {
+ HInstruction* instruction = removed_instructions_[i];
+ DCHECK(instruction != nullptr);
+ HInstruction* substitute = substitute_instructions_[i];
+ if (substitute != nullptr) {
+ // Keep tracing substitute till one that's not removed.
+ HInstruction* sub_sub = FindSubstitute(substitute);
+ while (sub_sub != substitute) {
+ substitute = sub_sub;
+ sub_sub = FindSubstitute(substitute);
+ }
+ instruction->ReplaceWith(substitute);
+ }
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+ // TODO: remove unnecessary allocations.
+ // Eliminate instructions in singleton_new_instances_ that:
+ // - don't have uses,
+ // - don't have finalizers,
+ // - are instantiable and accessible,
+ // - have no/separate clinit check.
+ }
+
+ private:
+ void MergePredecessorValues(HBasicBlock* block) {
+ const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
+ if (predecessors.size() == 0) {
+ return;
+ }
+ ArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ HInstruction* value = heap_values_for_[predecessors[0]->GetBlockId()][i];
+ if (value != kUnknownHeapValue) {
+ for (size_t j = 1; j < predecessors.size(); j++) {
+ if (heap_values_for_[predecessors[j]->GetBlockId()][i] != value) {
+ value = kUnknownHeapValue;
+ break;
+ }
+ }
+ }
+ heap_values[i] = value;
+ }
+ }
+
+ // `instruction` is being removed. Try to see if the null check on it
+ // can be removed. This can happen if the same value is set in two branches
+ // but not in dominators. Such as:
+ // int[] a = foo();
+ // if () {
+ // a[0] = 2;
+ // } else {
+ // a[0] = 2;
+ // }
+ // // a[0] can now be replaced with constant 2, and the null check on it can be removed.
+ void TryRemovingNullCheck(HInstruction* instruction) {
+ HInstruction* prev = instruction->GetPrevious();
+ if ((prev != nullptr) && prev->IsNullCheck() && (prev == instruction->InputAt(0))) {
+ // Previous instruction is a null check for this instruction. Remove the null check.
+ prev->ReplaceWith(prev->InputAt(0));
+ prev->GetBlock()->RemoveInstruction(prev);
+ }
+ }
+
+ HInstruction* GetDefaultValue(Primitive::Type type) {
+ switch (type) {
+ case Primitive::kPrimNot:
+ return GetGraph()->GetNullConstant();
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ return GetGraph()->GetIntConstant(0);
+ case Primitive::kPrimLong:
+ return GetGraph()->GetLongConstant(0);
+ case Primitive::kPrimFloat:
+ return GetGraph()->GetFloatConstant(0);
+ case Primitive::kPrimDouble:
+ return GetGraph()->GetDoubleConstant(0);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ void VisitGetLocation(HInstruction* instruction,
+ HInstruction* ref,
+ size_t offset,
+ HInstruction* index,
+ int16_t declaring_class_def_index) {
+ HInstruction* original_ref = HuntForOriginalReference(ref);
+ ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
+ size_t idx = heap_location_collector_.FindHeapLocationIndex(
+ ref_info, offset, index, declaring_class_def_index);
+ DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
+ ArenaVector<HInstruction*>& heap_values =
+ heap_values_for_[instruction->GetBlock()->GetBlockId()];
+ HInstruction* heap_value = heap_values[idx];
+ if (heap_value == kDefaultHeapValue) {
+ HInstruction* constant = GetDefaultValue(instruction->GetType());
+ removed_instructions_.push_back(instruction);
+ substitute_instructions_.push_back(constant);
+ heap_values[idx] = constant;
+ return;
+ }
+ if ((heap_value != kUnknownHeapValue) &&
+ // Keep the load due to possible I/F, J/D array aliasing.
+ // See b/22538329 for details.
+ (heap_value->GetType() == instruction->GetType())) {
+ removed_instructions_.push_back(instruction);
+ substitute_instructions_.push_back(heap_value);
+ TryRemovingNullCheck(instruction);
+ return;
+ }
+
+ if (heap_value == kUnknownHeapValue) {
+ // Put the load as the value into the HeapLocation.
+ // This acts like GVN but with better aliasing analysis.
+ heap_values[idx] = instruction;
+ }
+ }
+
+ bool Equal(HInstruction* heap_value, HInstruction* value) {
+ if (heap_value == value) {
+ return true;
+ }
+ if (heap_value == kDefaultHeapValue && GetDefaultValue(value->GetType()) == value) {
+ return true;
+ }
+ return false;
+ }
+
+ void VisitSetLocation(HInstruction* instruction,
+ HInstruction* ref,
+ size_t offset,
+ HInstruction* index,
+ int16_t declaring_class_def_index,
+ HInstruction* value) {
+ HInstruction* original_ref = HuntForOriginalReference(ref);
+ ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
+ size_t idx = heap_location_collector_.FindHeapLocationIndex(
+ ref_info, offset, index, declaring_class_def_index);
+ DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
+ ArenaVector<HInstruction*>& heap_values =
+ heap_values_for_[instruction->GetBlock()->GetBlockId()];
+ HInstruction* heap_value = heap_values[idx];
+ bool redundant_store = false;
+ if (Equal(heap_value, value)) {
+ // Store into the heap location with the same value.
+ redundant_store = true;
+ } else if (index != nullptr) {
+ // For array element, don't eliminate stores since it can be easily aliased
+ // with non-constant index.
+ } else if (!heap_location_collector_.MayDeoptimize() &&
+ ref_info->IsSingletonAndNotReturned() &&
+ !heap_location_collector_.GetHeapLocation(idx)->MayBecomeUnknown()) {
+ // Store into a field of a singleton that's not returned. And that value cannot be
+ // killed due to merge. It's redundant since future loads will get the value
+ // set by this instruction.
+ Primitive::Type type = Primitive::kPrimVoid;
+ if (instruction->IsInstanceFieldSet()) {
+ type = instruction->AsInstanceFieldSet()->GetFieldInfo().GetFieldType();
+ } else if (instruction->IsStaticFieldSet()) {
+ type = instruction->AsStaticFieldSet()->GetFieldInfo().GetFieldType();
+ } else {
+ DCHECK(false) << "Must be an instance/static field set instruction.";
+ }
+ if (value->GetType() != type) {
+ // I/F, J/D aliasing should not happen for fields.
+ DCHECK(Primitive::IsIntegralType(value->GetType()));
+ DCHECK(!Primitive::Is64BitType(value->GetType()));
+ DCHECK(Primitive::IsIntegralType(type));
+ DCHECK(!Primitive::Is64BitType(type));
+ // Keep the store since the corresponding load isn't eliminated due to different types.
+ // TODO: handle the different int types so that we can eliminate this store.
+ redundant_store = false;
+ } else {
+ redundant_store = true;
+ }
+ // TODO: eliminate the store if the singleton object is not finalizable.
+ redundant_store = false;
+ }
+ if (redundant_store) {
+ removed_instructions_.push_back(instruction);
+ substitute_instructions_.push_back(nullptr);
+ TryRemovingNullCheck(instruction);
+ }
+
+ heap_values[idx] = value;
+ // This store may kill values in other heap locations due to aliasing.
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ if (heap_values[i] == value) {
+ // Same value should be kept even if aliasing happens.
+ continue;
+ }
+ if (heap_values[i] == kUnknownHeapValue) {
+ // Value is already unknown, no need for aliasing check.
+ continue;
+ }
+ if (heap_location_collector_.MayAlias(i, idx)) {
+ // Kill heap locations that may alias.
+ heap_values[i] = kUnknownHeapValue;
+ }
+ }
+ }
+
+ void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+ HInstruction* obj = instruction->InputAt(0);
+ size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+ int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+ VisitGetLocation(instruction, obj, offset, nullptr, declaring_class_def_index);
+ }
+
+ void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+ HInstruction* obj = instruction->InputAt(0);
+ size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+ int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+ HInstruction* value = instruction->InputAt(1);
+ VisitSetLocation(instruction, obj, offset, nullptr, declaring_class_def_index, value);
+ }
+
+ void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+ HInstruction* cls = instruction->InputAt(0);
+ size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+ int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+ VisitGetLocation(instruction, cls, offset, nullptr, declaring_class_def_index);
+ }
+
+ void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+ HInstruction* cls = instruction->InputAt(0);
+ size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+ int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+ HInstruction* value = instruction->InputAt(1);
+ VisitSetLocation(instruction, cls, offset, nullptr, declaring_class_def_index, value);
+ }
+
+ void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ VisitGetLocation(instruction,
+ array,
+ HeapLocation::kInvalidFieldOffset,
+ index,
+ HeapLocation::kDeclaringClassDefIndexForArrays);
+ }
+
+ void VisitArraySet(HArraySet* instruction) OVERRIDE {
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ HInstruction* value = instruction->InputAt(2);
+ VisitSetLocation(instruction,
+ array,
+ HeapLocation::kInvalidFieldOffset,
+ index,
+ HeapLocation::kDeclaringClassDefIndexForArrays,
+ value);
+ }
+
+ void HandleInvoke(HInstruction* invoke) {
+ ArenaVector<HInstruction*>& heap_values =
+ heap_values_for_[invoke->GetBlock()->GetBlockId()];
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
+ if (ref_info->IsSingleton()) {
+ // Singleton references cannot be seen by the callee.
+ } else {
+ heap_values[i] = kUnknownHeapValue;
+ }
+ }
+ }
+
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+ HandleInvoke(clinit);
+ }
+
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+ // Conservatively treat it as an invocation.
+ HandleInvoke(instruction);
+ }
+
+ void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+ // Conservatively treat it as an invocation.
+ HandleInvoke(instruction);
+ }
+
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+ // Conservatively treat it as an invocation.
+ HandleInvoke(instruction);
+ }
+
+ void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+ // Conservatively treat it as an invocation.
+ HandleInvoke(instruction);
+ }
+
+ void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+ ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_instance);
+ if (ref_info == nullptr) {
+ // new_instance isn't used for field accesses. No need to process it.
+ return;
+ }
+ if (!heap_location_collector_.MayDeoptimize() &&
+ ref_info->IsSingletonAndNotReturned()) {
+ // The allocation might be eliminated.
+ singleton_new_instances_.push_back(new_instance);
+ }
+ ArenaVector<HInstruction*>& heap_values =
+ heap_values_for_[new_instance->GetBlock()->GetBlockId()];
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ HInstruction* ref =
+ heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo()->GetReference();
+ size_t offset = heap_location_collector_.GetHeapLocation(i)->GetOffset();
+ if (ref == new_instance && offset >= mirror::kObjectHeaderSize) {
+ // Instance fields except the header fields are set to default heap values.
+ heap_values[i] = kDefaultHeapValue;
+ }
+ }
+ }
+
+ // Find an instruction's substitute if it should be removed.
+ // Return the same instruction if it should not be removed.
+ HInstruction* FindSubstitute(HInstruction* instruction) {
+ size_t size = removed_instructions_.size();
+ for (size_t i = 0; i < size; i++) {
+ if (removed_instructions_[i] == instruction) {
+ return substitute_instructions_[i];
+ }
+ }
+ return instruction;
+ }
+
+ const HeapLocationCollector& heap_location_collector_;
+ const SideEffectsAnalysis& side_effects_;
+
+ // One array of heap values for each block.
+ ArenaVector<ArenaVector<HInstruction*>> heap_values_for_;
+
+ // We record the instructions that should be eliminated but may be
+ // used by heap locations. They'll be removed in the end.
+ ArenaVector<HInstruction*> removed_instructions_;
+ ArenaVector<HInstruction*> substitute_instructions_;
+ ArenaVector<HInstruction*> singleton_new_instances_;
+
+ DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
+};
+
+void LoadStoreElimination::Run() {
+ if (graph_->IsDebuggable()) {
+ // Debugger may set heap values or trigger deoptimization of callers.
+ // Skip this optimization.
+ return;
+ }
+ HeapLocationCollector heap_location_collector(graph_);
+ for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ heap_location_collector.VisitBasicBlock(it.Current());
+ }
+ if (heap_location_collector.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) {
+ // Bail out if there are too many heap locations to deal with.
+ return;
+ }
+ if (!heap_location_collector.HasHeapStores()) {
+ // Without heap stores, this pass would act mostly as GVN on heap accesses.
+ return;
+ }
+ if (heap_location_collector.HasVolatile() || heap_location_collector.HasMonitorOps()) {
+ // Don't do load/store elimination if the method has volatile field accesses or
+ // monitor operations, for now.
+ // TODO: do it right.
+ return;
+ }
+ heap_location_collector.BuildAliasingMatrix();
+ LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_);
+ for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ lse_visitor.VisitBasicBlock(it.Current());
+ }
+ lse_visitor.RemoveInstructions();
+}
+
+} // namespace art
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
new file mode 100644
index 0000000..1d9e5c8
--- /dev/null
+++ b/compiler/optimizing/load_store_elimination.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_LOAD_STORE_ELIMINATION_H_
+#define ART_COMPILER_OPTIMIZING_LOAD_STORE_ELIMINATION_H_
+
+#include "optimization.h"
+
+namespace art {
+
+class SideEffectsAnalysis;
+
+class LoadStoreElimination : public HOptimization {
+ public:
+ LoadStoreElimination(HGraph* graph, const SideEffectsAnalysis& side_effects)
+ : HOptimization(graph, kLoadStoreEliminationPassName),
+ side_effects_(side_effects) {}
+
+ void Run() OVERRIDE;
+
+ static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination";
+
+ private:
+ const SideEffectsAnalysis& side_effects_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStoreElimination);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_LOAD_STORE_ELIMINATION_H_
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index ed401b6..98c3096 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1591,7 +1591,6 @@
// Replace the invoke with the return value of the inlined graph.
if (last->IsReturn()) {
return_value = last->InputAt(0);
- invoke->ReplaceWith(return_value);
} else {
DCHECK(last->IsReturnVoid());
}
@@ -1639,10 +1638,6 @@
}
}
- if (return_value != nullptr) {
- invoke->ReplaceWith(return_value);
- }
-
// Update the meta information surrounding blocks:
// (1) the graph they are now in,
// (2) the reverse post order of that graph,
@@ -1712,20 +1707,21 @@
size_t parameter_index = 0;
for (HInstructionIterator it(entry_block_->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
+ HInstruction* replacement = nullptr;
if (current->IsNullConstant()) {
- current->ReplaceWith(outer_graph->GetNullConstant(current->GetDexPc()));
+ replacement = outer_graph->GetNullConstant(current->GetDexPc());
} else if (current->IsIntConstant()) {
- current->ReplaceWith(outer_graph->GetIntConstant(
- current->AsIntConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetIntConstant(
+ current->AsIntConstant()->GetValue(), current->GetDexPc());
} else if (current->IsLongConstant()) {
- current->ReplaceWith(outer_graph->GetLongConstant(
- current->AsLongConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetLongConstant(
+ current->AsLongConstant()->GetValue(), current->GetDexPc());
} else if (current->IsFloatConstant()) {
- current->ReplaceWith(outer_graph->GetFloatConstant(
- current->AsFloatConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetFloatConstant(
+ current->AsFloatConstant()->GetValue(), current->GetDexPc());
} else if (current->IsDoubleConstant()) {
- current->ReplaceWith(outer_graph->GetDoubleConstant(
- current->AsDoubleConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetDoubleConstant(
+ current->AsDoubleConstant()->GetValue(), current->GetDexPc());
} else if (current->IsParameterValue()) {
if (kIsDebugBuild
&& invoke->IsInvokeStaticOrDirect()
@@ -1735,13 +1731,25 @@
size_t last_input_index = invoke->InputCount() - 1;
DCHECK(parameter_index != last_input_index);
}
- current->ReplaceWith(invoke->InputAt(parameter_index++));
+ replacement = invoke->InputAt(parameter_index++);
} else if (current->IsCurrentMethod()) {
- current->ReplaceWith(outer_graph->GetCurrentMethod());
+ replacement = outer_graph->GetCurrentMethod();
} else {
DCHECK(current->IsGoto() || current->IsSuspendCheck());
entry_block_->RemoveInstruction(current);
}
+ if (replacement != nullptr) {
+ current->ReplaceWith(replacement);
+ // If the current is the return value then we need to update the latter.
+ if (current == return_value) {
+ DCHECK_EQ(entry_block_, return_value->GetBlock());
+ return_value = replacement;
+ }
+ }
+ }
+
+ if (return_value != nullptr) {
+ invoke->ReplaceWith(return_value);
}
// Finally remove the invoke from the caller.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 939e62c..7aa933d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -21,6 +21,7 @@
#include <array>
#include <type_traits>
+#include "base/arena_bit_vector.h"
#include "base/arena_containers.h"
#include "base/arena_object.h"
#include "base/stl_util.h"
@@ -34,7 +35,6 @@
#include "mirror/class.h"
#include "offsets.h"
#include "primitive.h"
-#include "utils/arena_bit_vector.h"
namespace art {
@@ -75,6 +75,7 @@
static constexpr uint64_t kMaxLongShiftValue = 0x3f;
static constexpr uint32_t kUnknownFieldIndex = static_cast<uint32_t>(-1);
+static constexpr uint16_t kUnknownClassDefIndex = static_cast<uint16_t>(-1);
static constexpr InvokeType kInvalidInvokeType = static_cast<InvokeType>(-1);
@@ -1079,14 +1080,25 @@
#define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)
+#ifndef ART_ENABLE_CODEGEN_arm64
#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
+#else
+#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \
+ M(Arm64IntermediateAddress, Instruction)
+#endif
+
+#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)
#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
+#ifndef ART_ENABLE_CODEGEN_x86
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)
+#else
#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
M(X86ComputeBaseMethodAddress, Instruction) \
M(X86LoadFromConstantTable, Instruction) \
M(X86PackedSwitch, Instruction)
+#endif
#define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1094,6 +1106,7 @@
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \
FOR_EACH_CONCRETE_INSTRUCTION_ARM(M) \
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \
+ FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \
FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M) \
FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1370,6 +1383,10 @@
return SideEffects(flags_ & ~other.flags_);
}
+ void Add(SideEffects other) {
+ flags_ |= other.flags_;
+ }
+
bool Includes(SideEffects other) const {
return (other.flags_ & flags_) == other.flags_;
}
@@ -1943,6 +1960,7 @@
}
SideEffects GetSideEffects() const { return side_effects_; }
+ void AddSideEffects(SideEffects other) { side_effects_.Add(other); }
size_t GetLifetimePosition() const { return lifetime_position_; }
void SetLifetimePosition(size_t position) { lifetime_position_ = position; }
@@ -2012,7 +2030,7 @@
// order of blocks where this instruction's live interval start.
size_t lifetime_position_;
- const SideEffects side_effects_;
+ SideEffects side_effects_;
// TODO: for primitive types this should be marked as invalid.
ReferenceTypeInfo reference_type_info_;
@@ -4059,24 +4077,31 @@
// the calling convention.
class HParameterValue : public HExpression<0> {
public:
- HParameterValue(uint8_t index,
+ HParameterValue(const DexFile& dex_file,
+ uint16_t type_index,
+ uint8_t index,
Primitive::Type parameter_type,
bool is_this = false)
: HExpression(parameter_type, SideEffects::None(), kNoDexPc),
+ dex_file_(dex_file),
+ type_index_(type_index),
index_(index),
is_this_(is_this),
can_be_null_(!is_this) {}
+ const DexFile& GetDexFile() const { return dex_file_; }
+ uint16_t GetTypeIndex() const { return type_index_; }
uint8_t GetIndex() const { return index_; }
+ bool IsThis() const { return is_this_; }
bool CanBeNull() const OVERRIDE { return can_be_null_; }
void SetCanBeNull(bool can_be_null) { can_be_null_ = can_be_null; }
- bool IsThis() const { return is_this_; }
-
DECLARE_INSTRUCTION(ParameterValue);
private:
+ const DexFile& dex_file_;
+ const uint16_t type_index_;
// The index of this parameter in the parameters list. Must be less
// than HGraph::number_of_in_vregs_.
const uint8_t index_;
@@ -4158,7 +4183,7 @@
Primitive::Type GetInputType() const { return GetInput()->GetType(); }
Primitive::Type GetResultType() const { return GetType(); }
- // Required by the x86 and ARM code generators when producing calls
+ // Required by the x86, ARM, MIPS and MIPS64 code generators when producing calls
// to the runtime.
bool CanBeMoved() const OVERRIDE { return true; }
@@ -4309,18 +4334,21 @@
Primitive::Type field_type,
bool is_volatile,
uint32_t index,
+ uint16_t declaring_class_def_index,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache)
: field_offset_(field_offset),
field_type_(field_type),
is_volatile_(is_volatile),
index_(index),
+ declaring_class_def_index_(declaring_class_def_index),
dex_file_(dex_file),
dex_cache_(dex_cache) {}
MemberOffset GetFieldOffset() const { return field_offset_; }
Primitive::Type GetFieldType() const { return field_type_; }
uint32_t GetFieldIndex() const { return index_; }
+ uint16_t GetDeclaringClassDefIndex() const { return declaring_class_def_index_;}
const DexFile& GetDexFile() const { return dex_file_; }
bool IsVolatile() const { return is_volatile_; }
Handle<mirror::DexCache> GetDexCache() const { return dex_cache_; }
@@ -4330,6 +4358,7 @@
const Primitive::Type field_type_;
const bool is_volatile_;
const uint32_t index_;
+ const uint16_t declaring_class_def_index_;
const DexFile& dex_file_;
const Handle<mirror::DexCache> dex_cache_;
};
@@ -4341,13 +4370,20 @@
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
+ uint16_t declaring_class_def_index,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HExpression(
- field_type,
- SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
+ : HExpression(field_type,
+ SideEffects::FieldReadOfType(field_type, is_volatile),
+ dex_pc),
+ field_info_(field_offset,
+ field_type,
+ is_volatile,
+ field_idx,
+ declaring_class_def_index,
+ dex_file,
+ dex_cache) {
SetRawInputAt(0, value);
}
@@ -4387,12 +4423,19 @@
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
+ uint16_t declaring_class_def_index,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HTemplateInstruction(
- SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
+ : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile),
+ dex_pc),
+ field_info_(field_offset,
+ field_type,
+ is_volatile,
+ field_idx,
+ declaring_class_def_index,
+ dex_file,
+ dex_cache),
value_can_be_null_(true) {
SetRawInputAt(0, object);
SetRawInputAt(1, value);
@@ -4424,8 +4467,11 @@
HArrayGet(HInstruction* array,
HInstruction* index,
Primitive::Type type,
- uint32_t dex_pc)
- : HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) {
+ uint32_t dex_pc,
+ SideEffects additional_side_effects = SideEffects::None())
+ : HExpression(type,
+ SideEffects::ArrayReadOfType(type).Union(additional_side_effects),
+ dex_pc) {
SetRawInputAt(0, array);
SetRawInputAt(1, index);
}
@@ -4460,10 +4506,13 @@
HInstruction* index,
HInstruction* value,
Primitive::Type expected_component_type,
- uint32_t dex_pc)
+ uint32_t dex_pc,
+ SideEffects additional_side_effects = SideEffects::None())
: HTemplateInstruction(
SideEffects::ArrayWriteOfType(expected_component_type).Union(
- SideEffectsForArchRuntimeCalls(value->GetType())), dex_pc),
+ SideEffectsForArchRuntimeCalls(value->GetType())).Union(
+ additional_side_effects),
+ dex_pc),
expected_component_type_(expected_component_type),
needs_type_check_(value->GetType() == Primitive::kPrimNot),
value_can_be_null_(true),
@@ -4518,6 +4567,10 @@
: expected_component_type_;
}
+ Primitive::Type GetRawExpectedComponentType() const {
+ return expected_component_type_;
+ }
+
static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type value_type) {
return (value_type == Primitive::kPrimNot) ? SideEffects::CanTriggerGC() : SideEffects::None();
}
@@ -4576,6 +4629,7 @@
bool CanThrow() const OVERRIDE { return true; }
+ HInstruction* GetIndex() const { return InputAt(0); }
DECLARE_INSTRUCTION(BoundsCheck);
@@ -4807,13 +4861,20 @@
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
+ uint16_t declaring_class_def_index,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HExpression(
- field_type,
- SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
+ : HExpression(field_type,
+ SideEffects::FieldReadOfType(field_type, is_volatile),
+ dex_pc),
+ field_info_(field_offset,
+ field_type,
+ is_volatile,
+ field_idx,
+ declaring_class_def_index,
+ dex_file,
+ dex_cache) {
SetRawInputAt(0, cls);
}
@@ -4850,12 +4911,19 @@
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
+ uint16_t declaring_class_def_index,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HTemplateInstruction(
- SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
+ : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile),
+ dex_pc),
+ field_info_(field_offset,
+ field_type,
+ is_volatile,
+ field_idx,
+ declaring_class_def_index,
+ dex_file,
+ dex_cache),
value_can_be_null_(true) {
SetRawInputAt(0, cls);
SetRawInputAt(1, value);
@@ -5389,6 +5457,9 @@
} // namespace art
+#ifdef ART_ENABLE_CODEGEN_arm64
+#include "nodes_arm64.h"
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
#include "nodes_x86.h"
#endif
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
new file mode 100644
index 0000000..885d3a2
--- /dev/null
+++ b/compiler/optimizing/nodes_arm64.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
+#define ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
+
+namespace art {
+
+// This instruction computes an intermediate address pointing in the 'middle' of an object. The
+// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
+// never used across anything that can trigger GC.
+class HArm64IntermediateAddress : public HExpression<2> {
+ public:
+ HArm64IntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
+ : HExpression(Primitive::kPrimNot, SideEffects::DependsOnGC(), dex_pc) {
+ SetRawInputAt(0, base_address);
+ SetRawInputAt(1, offset);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+
+ HInstruction* GetBaseAddress() const { return InputAt(0); }
+ HInstruction* GetOffset() const { return InputAt(1); }
+
+ DECLARE_INSTRUCTION(Arm64IntermediateAddress);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index 8eeac56..764f5fe 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -34,7 +34,8 @@
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
entry->AddInstruction(new (&allocator) HGoto());
@@ -76,8 +77,10 @@
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
- HInstruction* parameter2 = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter1 = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
+ HInstruction* parameter2 = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
entry->AddInstruction(new (&allocator) HExit());
@@ -102,7 +105,8 @@
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
ASSERT_FALSE(parameter->HasUses());
@@ -122,7 +126,8 @@
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter1 = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
HInstruction* with_environment = new (&allocator) HNullCheck(parameter1, 0);
entry->AddInstruction(parameter1);
entry->AddInstruction(with_environment);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 17a4743..d6f2543 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -58,6 +58,7 @@
#include "intrinsics.h"
#include "licm.h"
#include "jni/quick/jni_compiler.h"
+#include "load_store_elimination.h"
#include "nodes.h"
#include "prepare_for_register_allocation.h"
#include "reference_type_propagation.h"
@@ -361,6 +362,7 @@
return (instruction_set == kArm && !kArm32QuickCodeUseSoftFloat)
|| instruction_set == kArm64
|| (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat)
+ || instruction_set == kMips
|| instruction_set == kMips64
|| instruction_set == kX86
|| instruction_set == kX86_64;
@@ -460,6 +462,7 @@
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
LICM* licm = new (arena) LICM(graph, *side_effects);
+ LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects);
HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, induction);
ReferenceTypePropagation* type_propagation =
@@ -512,6 +515,7 @@
induction,
bce,
simplify3,
+ lse,
dce2,
// The codegen has a few assumptions that only the instruction simplifier
// can satisfy. For example, the code generator does not expect to see a
@@ -838,18 +842,26 @@
Handle<mirror::DexCache> dex_cache) const {
CompilerDriver* compiler_driver = GetCompilerDriver();
CompiledMethod* method = nullptr;
- const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
- DCHECK(!verified_method->HasRuntimeThrow());
- if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
- || CanHandleVerificationFailure(verified_method)) {
- method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
- method_idx, jclass_loader, dex_file, dex_cache);
- } else {
- if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime);
+ if (Runtime::Current()->IsAotCompiler()) {
+ const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
+ DCHECK(!verified_method->HasRuntimeThrow());
+ if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
+ || CanHandleVerificationFailure(verified_method)) {
+ method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, jclass_loader, dex_file, dex_cache);
} else {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified);
+ if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime);
+ } else {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified);
+ }
}
+ } else {
+ // This is for the JIT compiler, which has already ensured the class is verified.
+ // We can go straight to compiling.
+ DCHECK(Runtime::Current()->UseJit());
+ method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, jclass_loader, dex_file, dex_cache);
}
if (kIsDebugBuild &&
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index a1feaf7..26a05da 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -428,12 +428,21 @@
UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
}
+static mirror::Class* GetClassFromDexCache(Thread* self, const DexFile& dex_file, uint16_t type_idx)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::DexCache* dex_cache =
+ Runtime::Current()->GetClassLinker()->FindDexCache(self, dex_file, false);
+ // Get type from dex cache assuming it was populated by the verifier.
+ return dex_cache->GetResolvedType(type_idx);
+}
+
void RTPVisitor::VisitParameterValue(HParameterValue* instr) {
ScopedObjectAccess soa(Thread::Current());
// We check if the existing type is valid: the inliner may have set it.
if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) {
- // TODO: parse the signature and add precise types for the parameters.
- SetClassAsTypeInfo(instr, nullptr, /* is_exact */ false);
+ mirror::Class* resolved_class =
+ GetClassFromDexCache(soa.Self(), instr->GetDexFile(), instr->GetTypeIndex());
+ SetClassAsTypeInfo(instr, resolved_class, /* is_exact */ false);
}
}
@@ -479,11 +488,9 @@
void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache =
- Runtime::Current()->GetClassLinker()->FindDexCache(soa.Self(), instr->GetDexFile(), false);
// Get type from dex cache assuming it was populated by the verifier.
- mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex());
- // TODO: investigating why we are still getting unresolved classes: b/22821472.
+ mirror::Class* resolved_class =
+ GetClassFromDexCache(soa.Self(), instr->GetDexFile(), instr->GetTypeIndex());
if (resolved_class != nullptr) {
instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(
handles_->NewHandle(resolved_class), /* is_exact */ true));
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 6fc7772..ef22c81 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -85,12 +85,13 @@
bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
InstructionSet instruction_set) {
- return instruction_set == kArm64
- || instruction_set == kX86_64
+ return instruction_set == kArm
+ || instruction_set == kArm64
+ || instruction_set == kMips
|| instruction_set == kMips64
- || instruction_set == kArm
+ || instruction_set == kThumb2
|| instruction_set == kX86
- || instruction_set == kThumb2;
+ || instruction_set == kX86_64;
}
static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 1511606..080f970 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -475,7 +475,8 @@
NullHandle<mirror::DexCache> dex_cache;
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
@@ -487,6 +488,7 @@
MemberOffset(22),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0);
@@ -513,6 +515,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0);
@@ -521,6 +524,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0);
@@ -624,7 +628,8 @@
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
@@ -636,6 +641,7 @@
MemberOffset(42),
false,
kUnknownFieldIndex,
+ kUnknownClassDefIndex,
graph->GetDexFile(),
dex_cache,
0);
@@ -698,7 +704,8 @@
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry->AddInstruction(parameter);
HInstruction* constant1 = graph->GetIntConstant(1);
@@ -768,8 +775,10 @@
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* first = new (allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* second = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* first = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* second = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry->AddInstruction(first);
entry->AddInstruction(second);
@@ -820,10 +829,14 @@
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* one = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* two = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* three = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* four = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* one = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* two = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* three = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* four = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry->AddInstruction(one);
entry->AddInstruction(two);
entry->AddInstruction(three);
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index c4a3b28..560502f 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -15,8 +15,9 @@
*/
#include "stack_map.h"
+
+#include "base/arena_bit_vector.h"
#include "stack_map_stream.h"
-#include "utils/arena_bit_vector.h"
#include "gtest/gtest.h"
diff --git a/compiler/utils/arena_allocator_test.cc b/compiler/utils/arena_allocator_test.cc
index 7065527..7f67ef1 100644
--- a/compiler/utils/arena_allocator_test.cc
+++ b/compiler/utils/arena_allocator_test.cc
@@ -15,8 +15,8 @@
*/
#include "base/arena_allocator.h"
+#include "base/arena_bit_vector.h"
#include "gtest/gtest.h"
-#include "utils/arena_bit_vector.h"
namespace art {
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 16f29b0..4413906 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -391,10 +391,30 @@
DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "srl");
}
+TEST_F(AssemblerMIPS64Test, Rotr) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Rotr, 5, "rotr ${reg1}, ${reg2}, {imm}"), "rotr");
+}
+
TEST_F(AssemblerMIPS64Test, Sra) {
DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "sra");
}
+TEST_F(AssemblerMIPS64Test, Sllv) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Sllv, "sllv ${reg1}, ${reg2}, ${reg3}"), "sllv");
+}
+
+TEST_F(AssemblerMIPS64Test, Srlv) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Srlv, "srlv ${reg1}, ${reg2}, ${reg3}"), "srlv");
+}
+
+TEST_F(AssemblerMIPS64Test, Rotrv) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Rotrv, "rotrv ${reg1}, ${reg2}, ${reg3}"), "rotrv");
+}
+
+TEST_F(AssemblerMIPS64Test, Srav) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "srav");
+}
+
TEST_F(AssemblerMIPS64Test, Dsll) {
DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll, 5, "dsll ${reg1}, ${reg2}, {imm}"), "dsll");
}
@@ -403,20 +423,33 @@
DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl, 5, "dsrl ${reg1}, ${reg2}, {imm}"), "dsrl");
}
+TEST_F(AssemblerMIPS64Test, Drotr) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Drotr, 5, "drotr ${reg1}, ${reg2}, {imm}"),
+ "drotr");
+}
+
TEST_F(AssemblerMIPS64Test, Dsra) {
DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra, 5, "dsra ${reg1}, ${reg2}, {imm}"), "dsra");
}
TEST_F(AssemblerMIPS64Test, Dsll32) {
- DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"), "dsll32");
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"),
+ "dsll32");
}
TEST_F(AssemblerMIPS64Test, Dsrl32) {
- DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"), "dsrl32");
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"),
+ "dsrl32");
+}
+
+TEST_F(AssemblerMIPS64Test, Drotr32) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Drotr32, 5, "drotr32 ${reg1}, ${reg2}, {imm}"),
+ "drotr32");
}
TEST_F(AssemblerMIPS64Test, Dsra32) {
- DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"), "dsra32");
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"),
+ "dsra32");
}
TEST_F(AssemblerMIPS64Test, Sc) {
@@ -435,10 +468,6 @@
DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lld, -9, "lld ${reg1}, {imm}(${reg2})"), "lld");
}
-TEST_F(AssemblerMIPS64Test, Rotr) {
- DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Rotr, 5, "rotr ${reg1}, ${reg2}, {imm}"), "rotr");
-}
-
TEST_F(AssemblerMIPS64Test, Seleqz) {
DriverStr(RepeatRRR(&mips64::Mips64Assembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"),
"seleqz");
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 17c5282..384b879 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -537,10 +537,18 @@
// the runtime.
LogCompletionTime();
- if (kIsDebugBuild || (RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
- delete runtime_; // See field declaration for why this is manual.
- delete driver_;
- delete verification_results_;
+ if (!kIsDebugBuild && !(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+ // We want to just exit on non-debug builds, not bringing the runtime down
+ // in an orderly fashion. So release the following fields.
+ driver_.release();
+ image_writer_.release();
+ for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files_) {
+ dex_file.release();
+ }
+ oat_file_.release();
+ runtime_.release();
+ verification_results_.release();
+ key_value_store_.release();
}
}
@@ -1241,9 +1249,9 @@
runtime_options.push_back(std::make_pair(runtime_args_[i], nullptr));
}
- verification_results_ = new VerificationResults(compiler_options_.get());
+ verification_results_.reset(new VerificationResults(compiler_options_.get()));
callbacks_.reset(new QuickCompilerCallbacks(
- verification_results_,
+ verification_results_.get(),
&method_inliner_map_,
image_ ?
CompilerCallbacks::CallbackMode::kCompileBootImage :
@@ -1468,24 +1476,24 @@
class_loader = class_linker->CreatePathClassLoader(self, class_path_files);
}
- driver_ = new CompilerDriver(compiler_options_.get(),
- verification_results_,
- &method_inliner_map_,
- compiler_kind_,
- instruction_set_,
- instruction_set_features_.get(),
- image_,
- image_classes_.release(),
- compiled_classes_.release(),
- nullptr,
- thread_count_,
- dump_stats_,
- dump_passes_,
- dump_cfg_file_name_,
- dump_cfg_append_,
- compiler_phases_timings_.get(),
- swap_fd_,
- profile_file_);
+ driver_.reset(new CompilerDriver(compiler_options_.get(),
+ verification_results_.get(),
+ &method_inliner_map_,
+ compiler_kind_,
+ instruction_set_,
+ instruction_set_features_.get(),
+ image_,
+ image_classes_.release(),
+ compiled_classes_.release(),
+ nullptr,
+ thread_count_,
+ dump_stats_,
+ dump_passes_,
+ dump_cfg_file_name_,
+ dump_cfg_append_,
+ compiler_phases_timings_.get(),
+ swap_fd_,
+ profile_file_));
driver_->CompileAll(class_loader, dex_files_, timings_);
}
@@ -1587,7 +1595,7 @@
oat_writer.reset(new OatWriter(dex_files_, image_file_location_oat_checksum,
image_file_location_oat_data_begin,
image_patch_delta,
- driver_,
+ driver_.get(),
image_writer_.get(),
timings_,
key_value_store_.get()));
@@ -1776,22 +1784,21 @@
LOG(ERROR) << "Failed to create runtime";
return false;
}
- Runtime* runtime = Runtime::Current();
- runtime->SetInstructionSet(instruction_set_);
+ runtime_.reset(Runtime::Current());
+ runtime_->SetInstructionSet(instruction_set_);
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
- if (!runtime->HasCalleeSaveMethod(type)) {
- runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(), type);
+ if (!runtime_->HasCalleeSaveMethod(type)) {
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
}
- runtime->GetClassLinker()->FixupDexCaches(runtime->GetResolutionMethod());
+ runtime_->GetClassLinker()->FixupDexCaches(runtime_->GetResolutionMethod());
// Initialize maps for unstarted runtime. This needs to be here, as running clinits needs this
// set up.
interpreter::UnstartedRuntime::Initialize();
- runtime->GetClassLinker()->RunRootClinits();
- runtime_ = runtime;
+ runtime_->GetClassLinker()->RunRootClinits();
return true;
}
@@ -1940,9 +1947,7 @@
std::unique_ptr<SafeMap<std::string, std::string> > key_value_store_;
- // Not a unique_ptr as we want to just exit on non-debug builds, not bringing the compiler down
- // in an orderly fashion. The destructor takes care of deleting this.
- VerificationResults* verification_results_;
+ std::unique_ptr<VerificationResults> verification_results_;
DexFileToMethodInlinerMap method_inliner_map_;
std::unique_ptr<QuickCompilerCallbacks> callbacks_;
@@ -1950,9 +1955,7 @@
// Ownership for the class path files.
std::vector<std::unique_ptr<const DexFile>> class_path_files_;
- // Not a unique_ptr as we want to just exit on non-debug builds, not bringing the runtime down
- // in an orderly fashion. The destructor takes care of deleting this.
- Runtime* runtime_;
+ std::unique_ptr<Runtime> runtime_;
size_t thread_count_;
uint64_t start_ns_;
@@ -1981,16 +1984,14 @@
std::unique_ptr<std::unordered_set<std::string>> compiled_classes_;
std::unique_ptr<std::unordered_set<std::string>> compiled_methods_;
bool image_;
- std::unique_ptr<ImageWriter> image_writer_;
bool is_host_;
std::string android_root_;
std::vector<const DexFile*> dex_files_;
std::vector<jobject> dex_caches_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
- // Not a unique_ptr as we want to just exit on non-debug builds, not bringing the driver down
- // in an orderly fashion. The destructor takes care of deleting this.
- CompilerDriver* driver_;
+ std::unique_ptr<ImageWriter> image_writer_;
+ std::unique_ptr<CompilerDriver> driver_;
std::vector<std::string> verbose_methods_;
bool dump_stats_;
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index faa2d2d..c2f23aa 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -58,9 +58,10 @@
// 0, 1, movci
{ kRTypeMask, 2, "srl", "DTA", },
{ kRTypeMask, 3, "sra", "DTA", },
- { kRTypeMask, 4, "sllv", "DTS", },
- { kRTypeMask, 6, "srlv", "DTS", },
- { kRTypeMask, 7, "srav", "DTS", },
+ { kRTypeMask | (0x1f << 6), 4, "sllv", "DTS", },
+ { kRTypeMask | (0x1f << 6), 6, "srlv", "DTS", },
+ { kRTypeMask | (0x1f << 6), (1 << 6) | 6, "rotrv", "DTS", },
+ { kRTypeMask | (0x1f << 6), 7, "srav", "DTS", },
{ kRTypeMask, 8, "jr", "S", },
{ kRTypeMask | (0x1f << 11), 9 | (31 << 11), "jalr", "S", }, // rd = 31 is implicit.
{ kRTypeMask | (0x1f << 11), 9, "jr", "S", }, // rd = 0 is implicit.
@@ -74,9 +75,10 @@
{ kRTypeMask, 17, "mthi", "S", },
{ kRTypeMask, 18, "mflo", "D", },
{ kRTypeMask, 19, "mtlo", "S", },
- { kRTypeMask, 20, "dsllv", "DTS", },
- { kRTypeMask, 22, "dsrlv", "DTS", },
- { kRTypeMask, 23, "dsrav", "DTS", },
+ { kRTypeMask | (0x1f << 6), 20, "dsllv", "DTS", },
+ { kRTypeMask | (0x1f << 6), 22, "dsrlv", "DTS", },
+ { kRTypeMask | (0x1f << 6), (1 << 6) | 22, "drotrv", "DTS", },
+ { kRTypeMask | (0x1f << 6), 23, "dsrav", "DTS", },
{ kRTypeMask | (0x1f << 6), 24, "mult", "ST", },
{ kRTypeMask | (0x1f << 6), 25, "multu", "ST", },
{ kRTypeMask | (0x1f << 6), 26, "div", "ST", },
@@ -99,13 +101,14 @@
{ kRTypeMask, 46, "dsub", "DST", },
{ kRTypeMask, 47, "dsubu", "DST", },
// TODO: tge[u], tlt[u], teg, tne
- { kRTypeMask, 56, "dsll", "DTA", },
- { kRTypeMask, 58, "dsrl", "DTA", },
- { kRTypeMask, 59, "dsra", "DTA", },
- { kRTypeMask, 60, "dsll32", "DTA", },
- { kRTypeMask | (0x1f << 21), 62 | (1 << 21), "drotr32", "DTA", },
- { kRTypeMask, 62, "dsrl32", "DTA", },
- { kRTypeMask, 63, "dsra32", "DTA", },
+ { kRTypeMask | (0x1f << 21), 56, "dsll", "DTA", },
+ { kRTypeMask | (0x1f << 21), 58, "dsrl", "DTA", },
+ { kRTypeMask | (0x1f << 21), (1 << 21) | 58, "drotr", "DTA", },
+ { kRTypeMask | (0x1f << 21), 59, "dsra", "DTA", },
+ { kRTypeMask | (0x1f << 21), 60, "dsll32", "DTA", },
+ { kRTypeMask | (0x1f << 21), 62, "dsrl32", "DTA", },
+ { kRTypeMask | (0x1f << 21), (1 << 21) | 62, "drotr32", "DTA", },
+ { kRTypeMask | (0x1f << 21), 63, "dsra32", "DTA", },
// SPECIAL0
{ kSpecial0Mask | 0x7ff, (2 << 6) | 24, "mul", "DST" },
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index dbf5365..ea61b43 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -26,7 +26,6 @@
#include <vector>
#include "arch/instruction_set_features.h"
-#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
@@ -1967,10 +1966,13 @@
InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet());
const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method);
const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method);
- ArtCode art_code(method);
+ OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
+ reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
if (method->IsNative()) {
- DCHECK(art_code.GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
- DCHECK(art_code.GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
+ if (!Runtime::Current()->GetClassLinker()->IsQuickGenericJniStub(quick_oat_code_begin)) {
+ DCHECK(method_header->GetNativeGcMap() == nullptr) << PrettyMethod(method);
+ DCHECK(method_header->GetMappingTable() == nullptr) << PrettyMethod(method);
+ }
bool first_occurrence;
uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
state->ComputeOatSize(quick_oat_code_begin, &first_occurrence);
@@ -1984,8 +1986,6 @@
} else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
method->IsResolutionMethod() || method->IsImtConflictMethod() ||
method->IsImtUnimplementedMethod() || method->IsClassInitializer()) {
- DCHECK(art_code.GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
- DCHECK(art_code.GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
} else {
const DexFile::CodeItem* code_item = method->GetCodeItem();
size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
@@ -1993,22 +1993,22 @@
bool first_occurrence;
size_t gc_map_bytes = state->ComputeOatSize(
- art_code.GetNativeGcMap(image_pointer_size), &first_occurrence);
+ method_header->GetNativeGcMap(), &first_occurrence);
if (first_occurrence) {
state->stats_.gc_map_bytes += gc_map_bytes;
}
size_t pc_mapping_table_bytes = state->ComputeOatSize(
- art_code.GetMappingTable(image_pointer_size), &first_occurrence);
+ method_header->GetMappingTable(), &first_occurrence);
if (first_occurrence) {
state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
}
size_t vmap_table_bytes = 0u;
- if (!art_code.IsOptimized(image_pointer_size)) {
+ if (!method_header->IsOptimized()) {
// Method compiled with the optimizing compiler have no vmap table.
vmap_table_bytes = state->ComputeOatSize(
- art_code.GetVmapTable(image_pointer_size), &first_occurrence);
+ method_header->GetVmapTable(), &first_occurrence);
if (first_occurrence) {
state->stats_.vmap_table_bytes += vmap_table_bytes;
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 8fe3fa2..09d7311 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -19,13 +19,13 @@
include art/build/Android.common_build.mk
LIBART_COMMON_SRC_FILES := \
- art_code.cc \
art_field.cc \
art_method.cc \
atomic.cc.arm \
barrier.cc \
base/allocator.cc \
base/arena_allocator.cc \
+ base/arena_bit_vector.cc \
base/bit_vector.cc \
base/hex_dump.cc \
base/logging.cc \
@@ -156,6 +156,7 @@
oat_file.cc \
oat_file_assistant.cc \
oat_file_manager.cc \
+ oat_quick_method_header.cc \
object_lock.cc \
offsets.cc \
os_linux.cc \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index e676a09..d6ba304 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -39,7 +39,7 @@
runtime->SetInstructionSet(isa);
ArtMethod* save_method = runtime->CreateCalleeSaveMethod();
runtime->SetCalleeSaveMethod(save_method, type);
- QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = runtime->GetRuntimeMethodFrameInfo(save_method);
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
<< type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
<< frame_info.FpSpillMask() << std::dec;
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index d5c7846..9cbec1e 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -16,9 +16,9 @@
#include "context_arm.h"
-#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
+#include "thread-inl.h"
namespace art {
namespace arm {
@@ -37,23 +37,21 @@
arg0_ = 0;
}
-void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode art_code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = art_code.GetQuickFrameInfo();
+void ArmContext::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
uint32_t core_regs = frame_info.CoreSpillMask();
DCHECK_EQ(0u, core_regs & (static_cast<uint32_t>(-1) << kNumberOfCoreRegisters));
for (uint32_t core_reg : HighToLowBits(core_regs)) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
// FP registers come second, from the highest down to the lowest.
for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
- fprs_[fp_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index ea31055..2623ee9 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -35,7 +35,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index cdc03fe..d5d1ec7 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -18,9 +18,9 @@
#include "context_arm64.h"
-#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
+#include "thread-inl.h"
namespace art {
namespace arm64 {
@@ -39,21 +39,19 @@
arg0_ = 0;
}
-void Arm64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
+void Arm64Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
// FP registers come second, from the highest down to the lowest.
for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
- fprs_[fp_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index 11314e0..105e784 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -35,7 +35,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index 9af7c04..a500648 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -25,7 +25,7 @@
namespace art {
-class StackVisitor;
+class QuickMethodFrameInfo;
// Representation of a thread's context on the executing machine, used to implement long jumps in
// the quick stack frame layout.
@@ -39,10 +39,18 @@
// Re-initializes the registers for context re-use.
virtual void Reset() = 0;
+ static uintptr_t* CalleeSaveAddress(uint8_t* frame, int num, size_t frame_size) {
+ // Callee saves are held at the top of the frame
+ uint8_t* save_addr = frame + frame_size - ((num + 1) * sizeof(void*));
+#if defined(__i386__) || defined(__x86_64__)
+ save_addr -= sizeof(void*); // account for return address
+#endif
+ return reinterpret_cast<uintptr_t*>(save_addr);
+ }
+
// Reads values from callee saves in the given frame. The frame also holds
// the method that holds the layout.
- virtual void FillCalleeSaves(const StackVisitor& fr)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ virtual void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) = 0;
// Sets the stack pointer value.
virtual void SetSP(uintptr_t new_sp) = 0;
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index dba62d9..4dedb33 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -16,7 +16,6 @@
#include "context_mips.h"
-#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
@@ -37,21 +36,19 @@
arg0_ = 0;
}
-void MipsContext::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
+void MipsContext::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
// FP registers come second, from the highest down to the lowest.
for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
- fprs_[fp_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index 0affe53..f1e2905 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -34,7 +34,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc
index d808c9e..bd1ac3b 100644
--- a/runtime/arch/mips64/context_mips64.cc
+++ b/runtime/arch/mips64/context_mips64.cc
@@ -16,7 +16,6 @@
#include "context_mips64.h"
-#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
@@ -37,21 +36,19 @@
arg0_ = 0;
}
-void Mips64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
+void Mips64Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
// FP registers come second, from the highest down to the lowest.
for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
- fprs_[fp_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index 84b1c9b..89fbf8f 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -34,7 +34,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 0d88dd0..077d2db 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -16,10 +16,8 @@
#include "context_x86.h"
-#include "art_code.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
namespace art {
namespace x86 {
@@ -37,9 +35,7 @@
arg0_ = 0;
}
-void X86Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
+void X86Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
@@ -47,7 +43,7 @@
frame_info.CoreSpillMask() & ~(static_cast<uint32_t>(-1) << kNumberOfCpuRegisters);
DCHECK_EQ(1, POPCOUNT(frame_info.CoreSpillMask() & ~core_regs)); // Return address spill.
for (uint32_t core_reg : HighToLowBits(core_regs)) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) - 1);
@@ -58,9 +54,9 @@
for (uint32_t fp_reg : HighToLowBits(fp_regs)) {
// Two void* per XMM register.
fprs_[2 * fp_reg] = reinterpret_cast<uint32_t*>(
- fr.CalleeSaveAddress(spill_pos + 1, frame_info.FrameSizeInBytes()));
+ CalleeSaveAddress(frame, spill_pos + 1, frame_info.FrameSizeInBytes()));
fprs_[2 * fp_reg + 1] = reinterpret_cast<uint32_t*>(
- fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes()));
+ CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes()));
spill_pos += 2;
}
DCHECK_EQ(spill_pos,
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index 59beb12..f482d9f 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -34,7 +34,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(ESP, new_sp);
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 12c94bc..7c49e9c 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -16,10 +16,8 @@
#include "context_x86_64.h"
-#include "art_code.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
namespace art {
namespace x86_64 {
@@ -37,9 +35,7 @@
arg0_ = 0;
}
-void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
+void X86_64Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
@@ -47,7 +43,7 @@
frame_info.CoreSpillMask() & ~(static_cast<uint32_t>(-1) << kNumberOfCpuRegisters);
DCHECK_EQ(1, POPCOUNT(frame_info.CoreSpillMask() & ~core_regs)); // Return address spill.
for (uint32_t core_reg : HighToLowBits(core_regs)) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) - 1);
@@ -57,7 +53,7 @@
DCHECK_EQ(0u, fp_regs & (static_cast<uint32_t>(-1) << kNumberOfFloatRegisters));
for (uint32_t fp_reg : HighToLowBits(fp_regs)) {
fprs_[fp_reg] = reinterpret_cast<uint64_t*>(
- fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes()));
+ CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes()));
++spill_pos;
}
DCHECK_EQ(spill_pos,
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index f05b7f0..46f2b63 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -34,7 +34,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(RSP, new_sp);
diff --git a/runtime/art_code.cc b/runtime/art_code.cc
deleted file mode 100644
index ad0b170..0000000
--- a/runtime/art_code.cc
+++ /dev/null
@@ -1,333 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "art_code.h"
-
-#include "art_method.h"
-#include "art_method-inl.h"
-#include "class_linker.h"
-#include "entrypoints/runtime_asm_entrypoints.h"
-#include "handle_scope.h"
-#include "jit/jit.h"
-#include "jit/jit_code_cache.h"
-#include "mapping_table.h"
-#include "oat.h"
-#include "runtime.h"
-#include "utils.h"
-
-namespace art {
-
- // Converts a dex PC to a native PC.
-uintptr_t ArtCode::ToNativeQuickPc(const uint32_t dex_pc,
- bool is_for_catch_handler,
- bool abort_on_failure)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
- if (IsOptimized(sizeof(void*))) {
- // Optimized code does not have a mapping table. Search for the dex-to-pc
- // mapping in stack maps.
- CodeInfo code_info = GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
-
- // All stack maps are stored in the same CodeItem section, safepoint stack
- // maps first, then catch stack maps. We use `is_for_catch_handler` to select
- // the order of iteration.
- StackMap stack_map =
- LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
- : code_info.GetStackMapForDexPc(dex_pc, encoding);
- if (stack_map.IsValid()) {
- return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
- }
- } else {
- MappingTable table((entry_point != nullptr) ? GetMappingTable(sizeof(void*)) : nullptr);
- if (table.TotalSize() == 0) {
- DCHECK_EQ(dex_pc, 0U);
- return 0; // Special no mapping/pc == 0 case
- }
- // Assume the caller wants a dex-to-pc mapping so check here first.
- typedef MappingTable::DexToPcIterator It;
- for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
- // Now check pc-to-dex mappings.
- typedef MappingTable::PcToDexIterator It2;
- for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
- }
-
- if (abort_on_failure) {
- LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
- << " in " << PrettyMethod(method_);
- }
- return UINTPTR_MAX;
-}
-
-bool ArtCode::IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
- // Temporary solution for detecting if a method has been optimized: the compiler
- // does not create a GC map. Instead, the vmap table contains the stack map
- // (as in stack_map.h).
- return !method_->IsNative()
- && method_->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
- && GetQuickOatEntryPoint(pointer_size) != nullptr
- && GetNativeGcMap(pointer_size) == nullptr;
-}
-
-CodeInfo ArtCode::GetOptimizedCodeInfo() {
- DCHECK(IsOptimized(sizeof(void*)));
- const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(sizeof(void*)));
- DCHECK(code_pointer != nullptr);
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
- const void* data =
- reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
- return CodeInfo(data);
-}
-
-uintptr_t ArtCode::NativeQuickPcOffset(const uintptr_t pc) {
- const void* quick_entry_point = GetQuickOatEntryPoint(sizeof(void*));
- CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
- CHECK_EQ(quick_entry_point,
- Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_, sizeof(void*)));
- return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
-}
-
-uint32_t ArtCode::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
- const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
- uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
- if (IsOptimized(sizeof(void*))) {
- CodeInfo code_info = GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
- if (stack_map.IsValid()) {
- return stack_map.GetDexPc(encoding);
- }
- } else {
- MappingTable table(entry_point != nullptr ? GetMappingTable(sizeof(void*)) : nullptr);
- if (table.TotalSize() == 0) {
- // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
- // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
- DCHECK(method_->IsNative() || method_->IsCalleeSaveMethod() || method_->IsProxyMethod())
- << PrettyMethod(method_);
- return DexFile::kDexNoIndex; // Special no mapping case
- }
- // Assume the caller wants a pc-to-dex mapping so check here first.
- typedef MappingTable::PcToDexIterator It;
- for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.NativePcOffset() == sought_offset) {
- return cur.DexPc();
- }
- }
- // Now check dex-to-pc mappings.
- typedef MappingTable::DexToPcIterator It2;
- for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.NativePcOffset() == sought_offset) {
- return cur.DexPc();
- }
- }
- }
- if (abort_on_failure) {
- LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
- << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
- << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
- << ") in " << PrettyMethod(method_);
- }
- return DexFile::kDexNoIndex;
-}
-
-const uint8_t* ArtCode::GetNativeGcMap(size_t pointer_size) {
- const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
- if (code_pointer == nullptr) {
- return nullptr;
- }
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].gc_map_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-const uint8_t* ArtCode::GetVmapTable(size_t pointer_size) {
- CHECK(!IsOptimized(pointer_size)) << "Unimplemented vmap table for optimized compiler";
- const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
- if (code_pointer == nullptr) {
- return nullptr;
- }
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-const uint8_t* ArtCode::GetMappingTable(size_t pointer_size) {
- const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
- if (code_pointer == nullptr) {
- return nullptr;
- }
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].mapping_table_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-// Counts the number of references in the parameter list of the corresponding method.
-// Note: Thus does _not_ include "this" for non-static methods.
-static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t shorty_len;
- const char* shorty = method->GetShorty(&shorty_len);
- uint32_t refs = 0;
- for (uint32_t i = 1; i < shorty_len ; ++i) {
- if (shorty[i] == 'L') {
- refs++;
- }
- }
- return refs;
-}
-
-QuickMethodFrameInfo ArtCode::GetQuickFrameInfo() {
- Runtime* runtime = Runtime::Current();
-
- if (UNLIKELY(method_->IsAbstract())) {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
-
- // This goes before IsProxyMethod since runtime methods have a null declaring class.
- if (UNLIKELY(method_->IsRuntimeMethod())) {
- return runtime->GetRuntimeMethodFrameInfo(method_);
- }
-
- // For Proxy method we add special handling for the direct method case (there is only one
- // direct method - constructor). Direct method is cloned from original
- // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
- // quick compiled method without any stubs. So the frame info should be returned as it is a
- // quick method not a stub. However, if instrumentation stubs are installed, the
- // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
- // oat code pointer, thus we have to add a special case here.
- if (UNLIKELY(method_->IsProxyMethod())) {
- if (method_->IsDirect()) {
- CHECK(method_->IsConstructor());
- const void* code_pointer =
- EntryPointToCodePointer(method_->GetEntryPointFromQuickCompiledCode());
- return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
- } else {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
- }
-
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method_, sizeof(void*));
- ClassLinker* class_linker = runtime->GetClassLinker();
- // On failure, instead of null we get the quick-generic-jni-trampoline for native method
- // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
- // for non-native methods. And we really shouldn't see a failure for non-native methods here.
- DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
-
- if (class_linker->IsQuickGenericJniStub(entry_point)) {
- // Generic JNI frame.
- DCHECK(method_->IsNative());
- uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method_) + 1;
- size_t scope_size = HandleScope::SizeOf(handle_refs);
- QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-
- // Callee saves + handle scope + method ref + alignment
- // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
- size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() - sizeof(void*) +
- sizeof(ArtMethod*) + scope_size, kStackAlignment);
- return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
- }
-
- const void* code_pointer = EntryPointToCodePointer(entry_point);
- return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
-}
-
-void ArtCode::AssertPcIsWithinQuickCode(uintptr_t pc) {
- if (method_->IsNative() || method_->IsRuntimeMethod() || method_->IsProxyMethod()) {
- return;
- }
- if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
- return;
- }
- const void* code = method_->GetEntryPointFromQuickCompiledCode();
- if (code == GetQuickInstrumentationEntryPoint()) {
- return;
- }
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (class_linker->IsQuickToInterpreterBridge(code) ||
- class_linker->IsQuickResolutionStub(code)) {
- return;
- }
- // If we are the JIT then we may have just compiled the method after the
- // IsQuickToInterpreterBridge check.
- jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr &&
- jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
- return;
- }
-
- uint32_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(
- EntryPointToCodePointer(code))[-1].code_size_;
- uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
- CHECK(code_start <= pc && pc <= (code_start + code_size))
- << PrettyMethod(method_)
- << " pc=" << std::hex << pc
- << " code=" << code
- << " size=" << code_size;
-}
-
-bool ArtCode::PcIsWithinQuickCode(uintptr_t pc) {
- /*
- * During a stack walk, a return PC may point past-the-end of the code
- * in the case that the last instruction is a call that isn't expected to
- * return. Thus, we check <= code + GetCodeSize().
- *
- * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
- */
- uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(
- method_->GetEntryPointFromQuickCompiledCode()));
- if (code == 0) {
- return pc == 0;
- }
- uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
- return code <= pc && pc <= (code + code_size);
-}
-
-const void* ArtCode::GetQuickOatEntryPoint(size_t pointer_size) {
- if (method_->IsAbstract() || method_->IsRuntimeMethod() || method_->IsProxyMethod()) {
- return nullptr;
- }
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(method_, pointer_size);
- // On failure, instead of null we get the quick-generic-jni-trampoline for native method
- // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
- // for non-native methods.
- if (class_linker->IsQuickToInterpreterBridge(code) ||
- class_linker->IsQuickGenericJniStub(code)) {
- return nullptr;
- }
- return code;
-}
-
-} // namespace art
diff --git a/runtime/art_code.h b/runtime/art_code.h
deleted file mode 100644
index 1d2d898..0000000
--- a/runtime/art_code.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ART_CODE_H_
-#define ART_RUNTIME_ART_CODE_H_
-
-#include "base/mutex.h"
-#include "offsets.h"
-#include "quick/quick_method_frame_info.h"
-#include "stack_map.h"
-
-namespace art {
-
-class ArtMethod;
-
-class ArtCode FINAL {
- public:
- explicit ArtCode(ArtMethod** method) : method_(*method) {}
- explicit ArtCode(ArtMethod* method) : method_(method) {}
- ArtCode() : method_(nullptr) {}
-
- // Converts a dex PC to a native PC.
- uintptr_t ToNativeQuickPc(const uint32_t dex_pc,
- bool is_for_catch_handler,
- bool abort_on_failure = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
- uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Converts a native PC to a dex PC.
- uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Callers should wrap the uint8_t* in a GcMap instance for convenient access.
- const uint8_t* GetNativeGcMap(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- const uint8_t* GetVmapTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- const uint8_t* GetMappingTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
- FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
- return FrameOffset(GetFrameSizeInBytes() - sizeof(void*));
- }
-
- template <bool kCheckFrameSize = true>
- uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
- if (kCheckFrameSize) {
- DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
- }
- return result;
- }
-
- const void* GetQuickOatEntryPoint(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-
- bool PcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-
- FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
- constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
- DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
- return FrameOffset(handle_scope_offset);
- }
-
- ArtMethod* GetMethod() const { return method_; }
-
- private:
- ArtMethod* method_;
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_ART_CODE_H_
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index f9d9077..f5befdf 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -384,4 +384,28 @@
return oat_method.GetVmapTable();
}
+const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
+ if (IsRuntimeMethod() || IsProxyMethod()) {
+ return nullptr;
+ }
+
+ Runtime* runtime = Runtime::Current();
+ const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
+ DCHECK(code != nullptr);
+
+ if (runtime->GetClassLinker()->IsQuickGenericJniStub(code)) {
+ // The generic JNI does not have any method header.
+ return nullptr;
+ }
+
+ code = EntryPointToCodePointer(code);
+ OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
+ reinterpret_cast<uintptr_t>(code) - sizeof(OatQuickMethodHeader));
+
+ // TODO(ngeoffray): validate the pc. Note that unit tests can give unrelated pcs (for
+ // example arch_test).
+ UNUSED(pc);
+ return method_header;
+}
+
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 9743250..9f1495c 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -32,6 +32,7 @@
namespace art {
union JValue;
+class OatQuickMethodHeader;
class ProfilingInfo;
class ScopedObjectAccessAlreadyRunnable;
class StringPiece;
@@ -434,6 +435,11 @@
const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+ // Returns the method header for the compiled code containing 'pc'. Note that runtime
+ // methods will return null for this method, as they are not oat based.
+ const OatQuickMethodHeader* GetOatQuickMethodHeader(uintptr_t pc)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index f9960ac..ad255b8 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -19,6 +19,7 @@
#include <map>
#include <set>
+#include <unordered_map>
#include "atomic.h"
#include "base/macros.h"
@@ -150,19 +151,24 @@
template<class T, AllocatorTag kTag>
// C++ doesn't allow template typedefs. This is a workaround template typedef which is
// TrackingAllocatorImpl<T> if kEnableTrackingAllocator is true, std::allocator<T> otherwise.
-class TrackingAllocator : public TypeStaticIf<kEnableTrackingAllocator,
- TrackingAllocatorImpl<T, kTag>,
- std::allocator<T>>::type {
-};
+using TrackingAllocator = typename TypeStaticIf<kEnableTrackingAllocator,
+ TrackingAllocatorImpl<T, kTag>,
+ std::allocator<T>>::type;
template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
-class AllocationTrackingMultiMap : public std::multimap<
- Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>> {
-};
+using AllocationTrackingMultiMap = std::multimap<
+ Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>>;
template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
-class AllocationTrackingSet : public std::set<Key, Compare, TrackingAllocator<Key, kTag>> {
-};
+using AllocationTrackingSet = std::set<Key, Compare, TrackingAllocator<Key, kTag>>;
+
+template<class Key,
+ class T,
+ AllocatorTag kTag,
+ class Hash = std::hash<Key>,
+ class Pred = std::equal_to<Key>>
+using AllocationTrackingUnorderedMap = std::unordered_map<
+ Key, T, Hash, Pred, TrackingAllocator<std::pair<const Key, T>, kTag>>;
} // namespace art
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 1704688..71afa0f 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -94,6 +94,8 @@
"CodeGen ",
"ParallelMove ",
"GraphChecker ",
+ "LSE ",
+ "Verifier ",
};
template <bool kCount>
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 4e9282f..ace6c38 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -106,6 +106,8 @@
kArenaAllocCodeGenerator,
kArenaAllocParallelMoveResolver,
kArenaAllocGraphChecker,
+ kArenaAllocLSE,
+ kArenaAllocVerifier,
kNumArenaAllocKinds
};
diff --git a/compiler/utils/arena_bit_vector.cc b/runtime/base/arena_bit_vector.cc
similarity index 100%
rename from compiler/utils/arena_bit_vector.cc
rename to runtime/base/arena_bit_vector.cc
diff --git a/compiler/utils/arena_bit_vector.h b/runtime/base/arena_bit_vector.h
similarity index 92%
rename from compiler/utils/arena_bit_vector.h
rename to runtime/base/arena_bit_vector.h
index f2a7452..d606166 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/runtime/base/arena_bit_vector.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
-#define ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
+#ifndef ART_RUNTIME_BASE_ARENA_BIT_VECTOR_H_
+#define ART_RUNTIME_BASE_ARENA_BIT_VECTOR_H_
#include "base/arena_object.h"
#include "base/bit_vector.h"
@@ -65,4 +65,4 @@
} // namespace art
-#endif // ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
+#endif // ART_RUNTIME_BASE_ARENA_BIT_VECTOR_H_
diff --git a/runtime/base/dchecked_vector.h b/runtime/base/dchecked_vector.h
index 6ec573a..2bd12df 100644
--- a/runtime/base/dchecked_vector.h
+++ b/runtime/base/dchecked_vector.h
@@ -59,8 +59,10 @@
: Base() { }
explicit dchecked_vector(const allocator_type& alloc)
: Base(alloc) { }
+ // Note that we cannot forward to std::vector(size_type, const allocator_type&) because it is not
+ // available in C++11, which is the latest GCC can support. http://b/25022512
explicit dchecked_vector(size_type n, const allocator_type& alloc = allocator_type())
- : Base(n, alloc) { }
+ : Base(alloc) { resize(n); }
dchecked_vector(size_type n,
const value_type& value,
const allocator_type& alloc = allocator_type())
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index 4819f06..95baa82 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -420,6 +420,19 @@
Resize(Size() / max_load_factor_);
}
+ // Reserve enough room to insert until Size() == num_elements without requiring to grow the hash
+ // set. No-op if the hash set is already large enough to do this.
+ void Reserve(size_t num_elements) {
+ size_t num_buckets = num_elements / max_load_factor_;
+ // Deal with rounding errors. Add one for rounding.
+ while (static_cast<size_t>(num_buckets * max_load_factor_) <= num_elements + 1u) {
+ ++num_buckets;
+ }
+ if (num_buckets > NumBuckets()) {
+ Resize(num_buckets);
+ }
+ }
+
// To distance that inserted elements were probed. Used for measuring how good hash functions
// are.
size_t TotalProbeDistance() const {
@@ -488,6 +501,15 @@
}
}
+ // The hash set expands when Size() reaches ElementsUntilExpand().
+ size_t ElementsUntilExpand() const {
+ return elements_until_expand_;
+ }
+
+ size_t NumBuckets() const {
+ return num_buckets_;
+ }
+
private:
T& ElementForIndex(size_t index) {
DCHECK_LT(index, NumBuckets());
@@ -543,10 +565,6 @@
return emptyfn_.IsEmpty(ElementForIndex(index));
}
- size_t NumBuckets() const {
- return num_buckets_;
- }
-
// Allocate a number of buckets.
void AllocateStorage(size_t num_buckets) {
num_buckets_ = num_buckets;
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index 743e98e..8254063 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -333,4 +333,25 @@
ASSERT_NE(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 2, 3, 4})));
}
+TEST_F(HashSetTest, TestReserve) {
+ HashSet<std::string, IsEmptyFnString> hash_set;
+ std::vector<size_t> sizes = {1, 10, 25, 55, 128, 1024, 4096};
+ for (size_t size : sizes) {
+ hash_set.Reserve(size);
+ const size_t buckets_before = hash_set.NumBuckets();
+ // Check that we expanded enough.
+ CHECK_GE(hash_set.ElementsUntilExpand(), size);
+ // Try inserting elements until we are at our reserve size and ensure the hash set did not
+ // expand.
+ while (hash_set.Size() < size) {
+ hash_set.Insert(std::to_string(hash_set.Size()));
+ }
+ CHECK_EQ(hash_set.NumBuckets(), buckets_before);
+ }
+ // Check the behaviour for shrinking, it does not necessarily resize down.
+ constexpr size_t size = 100;
+ hash_set.Reserve(size);
+ CHECK_GE(hash_set.ElementsUntilExpand(), size);
+}
+
} // namespace art
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 2554fb0..a30c73d 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -31,6 +31,16 @@
template <typename T>
class ScopedArenaAllocatorAdapter;
+// Tag associated with each allocation to help prevent double free.
+enum class ArenaFreeTag : uint8_t {
+ // Allocation is used and has not yet been destroyed.
+ kUsed,
+ // Allocation has been destroyed.
+ kFree,
+};
+
+static constexpr size_t kArenaAlignment = 8;
+
// Holds a list of Arenas for use by ScopedArenaAllocator stack.
class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryTool {
public:
@@ -50,6 +60,12 @@
MemStats GetPeakStats() const;
+ // Return the arena tag associated with a pointer.
+ static ArenaFreeTag& ArenaTagForAllocation(void* ptr) {
+ DCHECK(kIsDebugBuild) << "Only debug builds have tags";
+ return *(reinterpret_cast<ArenaFreeTag*>(ptr) - 1);
+ }
+
private:
struct Peak;
struct Current;
@@ -72,13 +88,18 @@
if (UNLIKELY(IsRunningOnMemoryTool())) {
return AllocWithMemoryTool(bytes, kind);
}
- size_t rounded_bytes = RoundUp(bytes, 8);
+ // Add kArenaAlignment for the free or used tag. Required to preserve alignment.
+ size_t rounded_bytes = RoundUp(bytes + (kIsDebugBuild ? kArenaAlignment : 0u), kArenaAlignment);
uint8_t* ptr = top_ptr_;
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
ptr = AllocateFromNextArena(rounded_bytes);
}
CurrentStats()->RecordAlloc(bytes, kind);
top_ptr_ = ptr + rounded_bytes;
+ if (kIsDebugBuild) {
+ ptr += kArenaAlignment;
+ ArenaTagForAllocation(ptr) = ArenaFreeTag::kUsed;
+ }
return ptr;
}
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 562c2bf..1236585 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -20,6 +20,7 @@
#include <deque>
#include <queue>
#include <set>
+#include <type_traits>
#include <unordered_map>
#include <utility>
@@ -196,6 +197,47 @@
return ScopedArenaAllocatorAdapter<void>(this, kind);
}
+// Special deleter that only calls the destructor. Also checks for double free errors.
+template <typename T>
+class ArenaDelete {
+ static constexpr uint8_t kMagicFill = 0xCE;
+ public:
+ void operator()(T* ptr) const {
+ ptr->~T();
+ if (RUNNING_ON_MEMORY_TOOL > 0) {
+ // Writing to the memory will fail if it we already destroyed the pointer with
+ // DestroyOnlyDelete since we make it no access.
+ memset(ptr, kMagicFill, sizeof(T));
+ MEMORY_TOOL_MAKE_NOACCESS(ptr, sizeof(T));
+ } else if (kIsDebugBuild) {
+ CHECK(ArenaStack::ArenaTagForAllocation(reinterpret_cast<void*>(ptr)) == ArenaFreeTag::kUsed)
+ << "Freeing invalid object " << ptr;
+ ArenaStack::ArenaTagForAllocation(reinterpret_cast<void*>(ptr)) = ArenaFreeTag::kFree;
+ // Write a magic value to try and catch use after free error.
+ memset(ptr, kMagicFill, sizeof(T));
+ }
+ }
+};
+
+// In general we lack support for arrays. We would need to call the destructor on each element,
+// which requires access to the array size. Support for that is future work.
+//
+// However, we can support trivially destructible component types, as then a destructor doesn't
+// need to be called.
+template <typename T>
+class ArenaDelete<T[]> {
+ public:
+ void operator()(T* ptr ATTRIBUTE_UNUSED) const {
+ static_assert(std::is_trivially_destructible<T>::value,
+ "ArenaUniquePtr does not support non-trivially-destructible arrays.");
+ // TODO: Implement debug checks, and MEMORY_TOOL support.
+ }
+};
+
+// Arena unique ptr that only calls the destructor of the element.
+template <typename T>
+using ArenaUniquePtr = std::unique_ptr<T, ArenaDelete<T>>;
+
} // namespace art
#endif // ART_RUNTIME_BASE_SCOPED_ARENA_CONTAINERS_H_
diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h
index d793bb6..46743e9 100644
--- a/runtime/base/stringpiece.h
+++ b/runtime/base/stringpiece.h
@@ -148,11 +148,24 @@
StringPiece substr(size_type pos, size_type n = npos) const;
+ int Compare(const StringPiece& rhs) const {
+ const int r = memcmp(data(), rhs.data(), std::min(size(), rhs.size()));
+ if (r != 0) {
+ return r;
+ }
+ if (size() < rhs.size()) {
+ return -1;
+ } else if (size() > rhs.size()) {
+ return 1;
+ }
+ return 0;
+ }
+
private:
// Pointer to char data, not necessarily zero terminated.
const char* ptr_;
// Length of data.
- size_type length_;
+ size_type length_;
};
// This large function is defined inline so that in a fairly common case where
@@ -201,9 +214,7 @@
}
inline bool operator<(const StringPiece& x, const StringPiece& y) {
- const int r = memcmp(x.data(), y.data(),
- std::min(x.size(), y.size()));
- return ((r < 0) || ((r == 0) && (x.size() < y.size())));
+ return x.Compare(y) < 0;
}
inline bool operator>(const StringPiece& x, const StringPiece& y) {
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index e897351..b9ea475 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -17,9 +17,9 @@
#ifndef ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
#define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
-#include "art_code.h"
#include "art_method-inl.h"
#include "gc_map.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack_map.h"
@@ -54,7 +54,7 @@
void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
} else {
CheckQuickMethod(registers, number_of_references, native_pc_offset);
@@ -65,7 +65,7 @@
void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
+ CodeInfo code_info = GetCurrentOatQuickMethodHeader()->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
@@ -109,7 +109,7 @@
void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- NativePcOffsetToReferenceMap map(GetCurrentCode().GetNativeGcMap(sizeof(void*)));
+ NativePcOffsetToReferenceMap map(GetCurrentOatQuickMethodHeader()->GetNativeGcMap());
const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
CHECK(ref_bitmap);
for (int i = 0; i < number_of_references; ++i) {
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 48a12e5..2871f76 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -248,7 +248,7 @@
// VRegA
bool HasVRegA() const;
- int32_t VRegA() const;
+ ALWAYS_INLINE int32_t VRegA() const;
int8_t VRegA_10t() const {
return VRegA_10t(Fetch16(0));
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 17e6aac..e57569e 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -16,7 +16,6 @@
#include "entrypoints/entrypoint_utils.h"
-#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/mutex.h"
@@ -31,6 +30,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "nth_caller_visitor.h"
+#include "oat_quick_method_header.h"
#include "reflection.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
@@ -359,33 +359,31 @@
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
auto** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
- ArtCode current_code = GetCallingCodeFrom(caller_sp);
+ const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
+ uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
+ (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
ArtMethod* outer_method = *caller_sp;
ArtMethod* caller = outer_method;
- if ((outer_method != nullptr) && current_code.IsOptimized(sizeof(void*))) {
- const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
- uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
- (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
- if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
- uintptr_t native_pc_offset = current_code.NativeQuickPcOffset(caller_pc);
- CodeInfo code_info = current_code.GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
- DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- caller = GetResolvedMethod(outer_method, inline_info, inline_info.GetDepth() - 1);
- }
- } else {
- // We're instrumenting, just use the StackVisitor which knows how to
- // handle instrumented frames.
- NthCallerVisitor visitor(Thread::Current(), 1, true);
- visitor.WalkStack();
- caller = visitor.caller;
- if (kIsDebugBuild) {
- // Avoid doing the check below.
- do_caller_check = false;
+ if (outer_method != nullptr) {
+ const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
+ if (current_code->IsOptimized()) {
+ if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
+ uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
+ CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ DCHECK(stack_map.IsValid());
+ if (stack_map.HasInlineInfo(encoding)) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+ caller = GetResolvedMethod(outer_method, inline_info, inline_info.GetDepth() - 1);
+ }
+ } else {
+ // We're instrumenting, just use the StackVisitor which knows how to
+ // handle instrumented frames.
+ NthCallerVisitor visitor(Thread::Current(), 1, true);
+ visitor.WalkStack();
+ caller = visitor.caller;
}
}
}
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 171ace2..0469ee6 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -20,7 +20,6 @@
#include <jni.h>
#include <stdint.h>
-#include "art_code.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "dex_instruction.h"
@@ -40,6 +39,7 @@
class ArtField;
class ArtMethod;
+class OatQuickMethodHeader;
class ScopedObjectAccessAlreadyRunnable;
class Thread;
@@ -185,10 +185,6 @@
Runtime::CalleeSaveType type,
bool do_caller_check = false);
-inline ArtCode GetCallingCodeFrom(ArtMethod** sp) {
- return ArtCode(sp);
-}
-
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 6035dfe..5eda6d6 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include "art_code.h"
#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "common_throws.h"
@@ -30,6 +29,7 @@
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "oat_quick_method_header.h"
#include "quick_exception_handler.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
@@ -295,8 +295,6 @@
static mirror::Object* GetProxyThisObject(ArtMethod** sp)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK((*sp)->IsProxyMethod());
- CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize,
- GetCallingCodeFrom(sp).GetFrameSizeInBytes());
CHECK_GT(kNumQuickGprArgs, 0u);
constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
@@ -323,10 +321,11 @@
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
- uintptr_t outer_pc_offset = GetCallingCodeFrom(caller_sp).NativeQuickPcOffset(outer_pc);
+ const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc);
+ uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
- if (GetCallingCodeFrom(caller_sp).IsOptimized(sizeof(void*))) {
- CodeInfo code_info = GetCallingCodeFrom(caller_sp).GetOptimizedCodeInfo();
+ if (current_code->IsOptimized()) {
+ CodeInfo code_info = current_code->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
DCHECK(stack_map.IsValid());
@@ -337,7 +336,7 @@
return stack_map.GetDexPc(encoding);
}
} else {
- return GetCallingCodeFrom(caller_sp).ToDexPc(outer_pc);
+ return current_code->ToDexPc(*caller_sp, outer_pc);
}
}
@@ -842,10 +841,6 @@
self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
// Register the top of the managed stack, making stack crawlable.
DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method);
- DCHECK_EQ(GetCallingCodeFrom(sp).GetFrameSizeInBytes(),
- ArtCode(Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs))
- .GetFrameSizeInBytes())
- << PrettyMethod(proxy_method);
self->VerifyStack();
// Start new JNI local reference state.
JNIEnvExt* env = self->GetJniEnv();
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 5299394..4e85913 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -49,7 +49,7 @@
static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
- QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(save_method);
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
<< type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
<< frame_info.FpSpillMask() << std::dec << " ISA " << isa;
@@ -58,8 +58,8 @@
static void CheckPCOffset(InstructionSet isa, Runtime::CalleeSaveType type, size_t pc_offset)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
- QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
- EXPECT_EQ(ArtCode(save_method).GetReturnPcOffset().SizeValue(), pc_offset)
+ QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(save_method);
+ EXPECT_EQ(frame_info.GetReturnPcOffset(), pc_offset)
<< "Expected and real pc offset differs for " << type
<< " core spills=" << std::hex << frame_info.CoreSpillMask()
<< " fp spills=" << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index da1d80e..4de8a8e 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -26,6 +26,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "mirror/stack_trace_element.h"
+#include "oat_quick_method_header.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
@@ -169,7 +170,7 @@
r->SetInstructionSet(kRuntimeISA);
ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
- QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = r->GetRuntimeMethodFrameInfo(save_method);
ASSERT_EQ(kStackAlignment, 16U);
// ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
@@ -186,15 +187,15 @@
fake_stack.push_back(0);
}
- fake_stack.push_back(
- ArtCode(method_g_).ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
+ fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
+ method_g_, dex_pc, /* is_catch_handler */ false)); // return pc
// Create/push fake 16byte stack frame for method g
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
fake_stack.push_back(0);
fake_stack.push_back(0);
- fake_stack.push_back(
- ArtCode(method_g_).ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
+ fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
+ method_g_, dex_pc, /* is_catch_handler */ false)); // return pc
// Create/push fake 16byte stack frame for method f
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 5b31b3a..52ccbee 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -20,10 +20,10 @@
#include <sys/mman.h>
#include <sys/ucontext.h>
-#include "art_code.h"
#include "art_method-inl.h"
#include "base/stl_util.h"
#include "mirror/class.h"
+#include "oat_quick_method_header.h"
#include "sigchain.h"
#include "thread-inl.h"
#include "verify_object-inl.h"
@@ -360,17 +360,17 @@
return false;
}
- ArtCode art_code(method_obj);
+ const OatQuickMethodHeader* method_header = method_obj->GetOatQuickMethodHeader(return_pc);
// We can be certain that this is a method now. Check if we have a GC map
// at the return PC address.
if (true || kIsDebugBuild) {
VLOG(signals) << "looking for dex pc for return pc " << std::hex << return_pc;
uint32_t sought_offset = return_pc -
- reinterpret_cast<uintptr_t>(art_code.GetQuickOatEntryPoint(sizeof(void*)));
+ reinterpret_cast<uintptr_t>(method_header->GetEntryPoint());
VLOG(signals) << "pc offset: " << std::hex << sought_offset;
}
- uint32_t dexpc = art_code.ToDexPc(return_pc, false);
+ uint32_t dexpc = method_header->ToDexPc(method_obj, return_pc, false);
VLOG(signals) << "dexpc: " << dexpc;
return !check_dex_pc || dexpc != DexFile::kDexNoIndex;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 657fcb5..1d38525 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -26,6 +26,7 @@
#include "art_field-inl.h"
#include "base/allocator.h"
+#include "base/arena_allocator.h"
#include "base/dumpable.h"
#include "base/histogram-inl.h"
#include "base/stl_util.h"
@@ -1258,11 +1259,11 @@
}
void Heap::Trim(Thread* self) {
+ Runtime* const runtime = Runtime::Current();
if (!CareAboutPauseTimes()) {
ATRACE_BEGIN("Deflating monitors");
// Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
// about pauses.
- Runtime* runtime = Runtime::Current();
{
ScopedSuspendAll ssa(__FUNCTION__);
uint64_t start_time = NanoTime();
@@ -1274,6 +1275,10 @@
}
TrimIndirectReferenceTables(self);
TrimSpaces(self);
+ // Trim arenas that may have been used by JIT or verifier.
+ ATRACE_BEGIN("Trimming arena maps");
+ runtime->GetArenaPool()->TrimMaps();
+ ATRACE_END();
}
class TrimIndirectReferenceTableClosure : public Closure {
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 3a0d814..b1572cc 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -56,7 +56,7 @@
mark_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
- CHECK(live_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
+ CHECK(mark_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
<< bitmap_index;
}
for (auto& freed : recent_freed_objects_) {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 2dd2b7d..ed64d7e 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -19,7 +19,6 @@
#include <sstream>
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "atomic.h"
#include "class_linker.h"
@@ -37,6 +36,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "nth_caller_visitor.h"
+#include "oat_quick_method_header.h"
#include "thread.h"
#include "thread_list.h"
@@ -252,7 +252,9 @@
instrumentation_stack_->insert(it, instrumentation_frame);
SetReturnPc(instrumentation_exit_pc_);
}
- dex_pcs_.push_back(GetCurrentCode().ToDexPc(last_return_pc_));
+ dex_pcs_.push_back((GetCurrentOatQuickMethodHeader() == nullptr)
+ ? DexFile::kDexNoIndex
+ : GetCurrentOatQuickMethodHeader()->ToDexPc(m, last_return_pc_));
last_return_pc_ = return_pc;
++instrumentation_stack_depth_;
return true; // Continue.
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 5427a58..df6936b 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1171,6 +1171,13 @@
return VM_AllClassesImpl(pReply, false, false);
}
+// Delete function class to use std::unique_ptr with JdwpEvent.
+struct JdwpEventDeleter {
+ void operator()(JdwpEvent* event) {
+ EventFree(event);
+ }
+};
+
/*
* Set an event trigger.
*
@@ -1184,7 +1191,7 @@
CHECK_LT(modifier_count, 256); /* reasonableness check */
- JdwpEvent* pEvent = EventAlloc(modifier_count);
+ std::unique_ptr<JDWP::JdwpEvent, JdwpEventDeleter> pEvent(EventAlloc(modifier_count));
pEvent->eventKind = event_kind;
pEvent->suspend_policy = suspend_policy;
pEvent->modCount = modifier_count;
@@ -1293,8 +1300,6 @@
break;
default:
LOG(WARNING) << "Unsupported modifier " << mod.modKind << " for event " << pEvent->eventKind;
- // Free allocated event to avoid leak before leaving.
- EventFree(pEvent);
return JDWP::ERR_NOT_IMPLEMENTED;
}
}
@@ -1310,13 +1315,14 @@
VLOG(jdwp) << StringPrintf(" --> event requestId=%#x", requestId);
/* add it to the list */
- JdwpError err = state->RegisterEvent(pEvent);
+ JdwpError err = state->RegisterEvent(pEvent.get());
if (err != ERR_NONE) {
/* registration failed, probably because event is bogus */
- EventFree(pEvent);
LOG(WARNING) << "WARNING: event request rejected";
+ return err;
}
- return err;
+ pEvent.release();
+ return ERR_NONE;
}
static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*)
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4c53162..4187358 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -25,37 +25,77 @@
namespace art {
namespace jit {
+static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
+static constexpr int kProtData = PROT_READ | PROT_WRITE;
+static constexpr int kProtCode = PROT_READ | PROT_EXEC;
+
+#define CHECKED_MPROTECT(memory, size, prot) \
+ do { \
+ int rc = mprotect(memory, size, prot); \
+ if (UNLIKELY(rc != 0)) { \
+ errno = rc; \
+ PLOG(FATAL) << "Failed to mprotect jit code cache"; \
+ } \
+ } while (false) \
+
JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
CHECK_GT(capacity, 0U);
CHECK_LT(capacity, kMaxCapacity);
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
- MemMap* map = MemMap::MapAnonymous("jit-code-cache", nullptr, capacity,
- PROT_READ | PROT_WRITE | PROT_EXEC, false, false, &error_str);
- if (map == nullptr) {
+ MemMap* data_map = MemMap::MapAnonymous(
+ "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
+ if (data_map == nullptr) {
std::ostringstream oss;
oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
*error_msg = oss.str();
return nullptr;
}
- return new JitCodeCache(map);
+
+ // Data cache is 1 / 4 of the map.
+ // TODO: Make this variable?
+ size_t data_size = RoundUp(data_map->Size() / 4, kPageSize);
+ size_t code_size = data_map->Size() - data_size;
+ uint8_t* divider = data_map->Begin() + data_size;
+
+ // We need to have 32 bit offsets from method headers in code cache which point to things
+ // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
+ MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
+ if (code_map == nullptr) {
+ std::ostringstream oss;
+ oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
+ *error_msg = oss.str();
+ return nullptr;
+ }
+ DCHECK_EQ(code_map->Size(), code_size);
+ DCHECK_EQ(code_map->Begin(), divider);
+ return new JitCodeCache(code_map, data_map);
}
-JitCodeCache::JitCodeCache(MemMap* mem_map)
- : lock_("Jit code cache", kJitCodeCacheLock), num_methods_(0) {
- VLOG(jit) << "Created jit code cache size=" << PrettySize(mem_map->Size());
- mem_map_.reset(mem_map);
- uint8_t* divider = mem_map->Begin() + RoundUp(mem_map->Size() / 4, kPageSize);
- // Data cache is 1 / 4 of the map. TODO: Make this variable?
- // Put data at the start.
- data_cache_ptr_ = mem_map->Begin();
- data_cache_end_ = divider;
- data_cache_begin_ = data_cache_ptr_;
- mprotect(data_cache_ptr_, data_cache_end_ - data_cache_begin_, PROT_READ | PROT_WRITE);
- // Code cache after.
- code_cache_begin_ = divider;
- code_cache_ptr_ = divider;
- code_cache_end_ = mem_map->End();
+JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
+ : lock_("Jit code cache", kJitCodeCacheLock),
+ code_map_(code_map),
+ data_map_(data_map),
+ num_methods_(0) {
+
+ VLOG(jit) << "Created jit code cache: data size="
+ << PrettySize(data_map_->Size())
+ << ", code size="
+ << PrettySize(code_map_->Size());
+
+ code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
+ data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
+
+ if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
+ PLOG(FATAL) << "create_mspace_with_base failed";
+ }
+
+ // Prevent morecore requests from the mspace.
+ mspace_set_footprint_limit(code_mspace_, code_map_->Size());
+ mspace_set_footprint_limit(data_mspace_, data_map_->Size());
+
+ CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
+ CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
}
bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
@@ -63,44 +103,97 @@
}
bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
- return ptr >= code_cache_begin_ && ptr < code_cache_end_;
+ return code_map_->Begin() <= ptr && ptr < code_map_->End();
}
-void JitCodeCache::FlushInstructionCache() {
- UNIMPLEMENTED(FATAL);
- // TODO: Investigate if we need to do this.
- // __clear_cache(reinterpret_cast<char*>(code_cache_begin_), static_cast<int>(CodeCacheSize()));
-}
-
-uint8_t* JitCodeCache::ReserveCode(Thread* self, size_t size) {
- MutexLock mu(self, lock_);
- if (size > CodeCacheRemain()) {
- return nullptr;
+class ScopedCodeCacheWrite {
+ public:
+ explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
+ CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
}
+ ~ScopedCodeCacheWrite() {
+ CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
+ }
+ private:
+ MemMap* const code_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
+};
+
+uint8_t* JitCodeCache::CommitCode(Thread* self,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size) {
+ size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ // Ensure the header ends up at expected instruction alignment.
+ size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
+ size_t total_size = header_size + code_size;
+
+ OatQuickMethodHeader* method_header = nullptr;
+ uint8_t* code_ptr = nullptr;
+
+ MutexLock mu(self, lock_);
+ {
+ ScopedCodeCacheWrite scc(code_map_.get());
+ uint8_t* result = reinterpret_cast<uint8_t*>(
+ mspace_memalign(code_mspace_, alignment, total_size));
+ if (result == nullptr) {
+ return nullptr;
+ }
+ code_ptr = result + header_size;
+ DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
+
+ std::copy(code, code + code_size, code_ptr);
+ method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
+ new (method_header) OatQuickMethodHeader(
+ (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
+ (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
+ (gc_map == nullptr) ? 0 : code_ptr - gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code_size);
+ }
+
+ __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
+ reinterpret_cast<char*>(code_ptr + code_size));
+
++num_methods_; // TODO: This is hacky but works since each method has exactly one code region.
- code_cache_ptr_ += size;
- return code_cache_ptr_ - size;
+ return reinterpret_cast<uint8_t*>(method_header);
+}
+
+size_t JitCodeCache::CodeCacheSize() {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t bytes_allocated = 0;
+ mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
+ return bytes_allocated;
+}
+
+size_t JitCodeCache::DataCacheSize() {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t bytes_allocated = 0;
+ mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
+ return bytes_allocated;
}
uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
- MutexLock mu(self, lock_);
size = RoundUp(size, sizeof(void*));
- if (size > DataCacheRemain()) {
- return nullptr;
- }
- data_cache_ptr_ += size;
- return data_cache_ptr_ - size;
+ MutexLock mu(self, lock_);
+ return reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
}
uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
- MutexLock mu(self, lock_);
- const size_t size = RoundUp(end - begin, sizeof(void*));
- if (size > DataCacheRemain()) {
+ uint8_t* result = ReserveData(self, end - begin);
+ if (result == nullptr) {
return nullptr; // Out of space in the data cache.
}
- std::copy(begin, end, data_cache_ptr_);
- data_cache_ptr_ += size;
- return data_cache_ptr_ - size;
+ std::copy(begin, end, result);
+ return result;
}
const void* JitCodeCache::GetCodeFor(ArtMethod* method) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index f485e4a..fa90c18 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -22,6 +22,7 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "gc/allocator/dlmalloc.h"
#include "gc_root.h"
#include "jni.h"
#include "oat_file.h"
@@ -48,34 +49,26 @@
// in the out arg error_msg.
static JitCodeCache* Create(size_t capacity, std::string* error_msg);
- const uint8_t* CodeCachePtr() const {
- return code_cache_ptr_;
- }
-
- size_t CodeCacheSize() const {
- return code_cache_ptr_ - code_cache_begin_;
- }
-
- size_t CodeCacheRemain() const {
- return code_cache_end_ - code_cache_ptr_;
- }
-
- const uint8_t* DataCachePtr() const {
- return data_cache_ptr_;
- }
-
- size_t DataCacheSize() const {
- return data_cache_ptr_ - data_cache_begin_;
- }
-
- size_t DataCacheRemain() const {
- return data_cache_end_ - data_cache_ptr_;
- }
-
size_t NumMethods() const {
return num_methods_;
}
+ size_t CodeCacheSize() REQUIRES(!lock_);
+
+ size_t DataCacheSize() REQUIRES(!lock_);
+
+ // Allocate and write code and its metadata to the code cache.
+ uint8_t* CommitCode(Thread* self,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size)
+ REQUIRES(!lock_);
+
// Return true if the code cache contains the code pointer which si the entrypoint of the method.
bool ContainsMethod(ArtMethod* method) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -83,9 +76,6 @@
// Return true if the code cache contains a code ptr.
bool ContainsCodePtr(const void* ptr) const;
- // Reserve a region of code of size at least "size". Returns null if there is no more room.
- uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_);
-
// Reserve a region of data of size at least "size". Returns null if there is no more room.
uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_);
@@ -105,25 +95,19 @@
private:
// Takes ownership of code_mem_map.
- explicit JitCodeCache(MemMap* code_mem_map);
-
- // Unimplemented, TODO: Determine if it is necessary.
- void FlushInstructionCache();
+ JitCodeCache(MemMap* code_map, MemMap* data_map);
// Lock which guards.
Mutex lock_;
- // Mem map which holds code and data. We do this since we need to have 32 bit offsets from method
- // headers in code cache which point to things in the data cache. If the maps are more than 4GB
- // apart, having multiple maps wouldn't work.
- std::unique_ptr<MemMap> mem_map_;
- // Code cache section.
- uint8_t* code_cache_ptr_;
- const uint8_t* code_cache_begin_;
- const uint8_t* code_cache_end_;
- // Data cache section.
- uint8_t* data_cache_ptr_;
- const uint8_t* data_cache_begin_;
- const uint8_t* data_cache_end_;
+ // Mem map which holds code.
+ std::unique_ptr<MemMap> code_map_;
+ // Mem map which holds data (stack maps and profiling info).
+ std::unique_ptr<MemMap> data_map_;
+ // The opaque mspace for allocating code.
+ void* code_mspace_;
+ // The opaque mspace for allocating data.
+ void* data_mspace_;
+ // Number of compiled methods.
size_t num_methods_;
// This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
// required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
diff --git a/runtime/jit/jit_code_cache_test.cc b/runtime/jit/jit_code_cache_test.cc
deleted file mode 100644
index c76dc11..0000000
--- a/runtime/jit/jit_code_cache_test.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common_runtime_test.h"
-
-#include "art_method-inl.h"
-#include "class_linker.h"
-#include "jit_code_cache.h"
-#include "scoped_thread_state_change.h"
-#include "thread-inl.h"
-
-namespace art {
-namespace jit {
-
-class JitCodeCacheTest : public CommonRuntimeTest {
- public:
-};
-
-TEST_F(JitCodeCacheTest, TestCoverage) {
- std::string error_msg;
- constexpr size_t kSize = 1 * MB;
- std::unique_ptr<JitCodeCache> code_cache(
- JitCodeCache::Create(kSize, &error_msg));
- ASSERT_TRUE(code_cache.get() != nullptr) << error_msg;
- ASSERT_TRUE(code_cache->CodeCachePtr() != nullptr);
- ASSERT_EQ(code_cache->CodeCacheSize(), 0u);
- ASSERT_GT(code_cache->CodeCacheRemain(), 0u);
- ASSERT_TRUE(code_cache->DataCachePtr() != nullptr);
- ASSERT_EQ(code_cache->DataCacheSize(), 0u);
- ASSERT_GT(code_cache->DataCacheRemain(), 0u);
- ASSERT_EQ(code_cache->CodeCacheRemain() + code_cache->DataCacheRemain(), kSize);
- ASSERT_EQ(code_cache->NumMethods(), 0u);
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- uint8_t* const reserved_code = code_cache->ReserveCode(soa.Self(), 4 * KB);
- ASSERT_TRUE(reserved_code != nullptr);
- ASSERT_TRUE(code_cache->ContainsCodePtr(reserved_code));
- ASSERT_EQ(code_cache->NumMethods(), 1u);
- Runtime* const runtime = Runtime::Current();
- ClassLinker* const class_linker = runtime->GetClassLinker();
- ArtMethod* method = &class_linker->AllocArtMethodArray(soa.Self(),
- runtime->GetLinearAlloc(),
- 1)->At(0);
- ASSERT_FALSE(code_cache->ContainsMethod(method));
- method->SetEntryPointFromQuickCompiledCode(reserved_code);
- ASSERT_TRUE(code_cache->ContainsMethod(method));
- ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code);
- // Save the code and then change it.
- code_cache->SaveCompiledCode(method, reserved_code);
- method->SetEntryPointFromQuickCompiledCode(nullptr);
- ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code);
- const uint8_t data_arr[] = {1, 2, 3, 4, 5};
- uint8_t* data_ptr = code_cache->AddDataArray(soa.Self(), data_arr, data_arr + sizeof(data_arr));
- ASSERT_TRUE(data_ptr != nullptr);
- ASSERT_EQ(memcmp(data_ptr, data_arr, sizeof(data_arr)), 0);
-}
-
-TEST_F(JitCodeCacheTest, TestOverflow) {
- std::string error_msg;
- constexpr size_t kSize = 1 * MB;
- std::unique_ptr<JitCodeCache> code_cache(
- JitCodeCache::Create(kSize, &error_msg));
- ASSERT_TRUE(code_cache.get() != nullptr) << error_msg;
- ASSERT_TRUE(code_cache->CodeCachePtr() != nullptr);
- size_t code_bytes = 0;
- size_t data_bytes = 0;
- constexpr size_t kCodeArrSize = 4 * KB;
- constexpr size_t kDataArrSize = 4 * KB;
- uint8_t data_arr[kDataArrSize];
- std::fill_n(data_arr, arraysize(data_arr), 53);
- // Add code and data until we are full.
- uint8_t* code_ptr = nullptr;
- uint8_t* data_ptr = nullptr;
- do {
- code_ptr = code_cache->ReserveCode(Thread::Current(), kCodeArrSize);
- data_ptr = code_cache->AddDataArray(Thread::Current(), data_arr, data_arr + kDataArrSize);
- if (code_ptr != nullptr) {
- code_bytes += kCodeArrSize;
- }
- if (data_ptr != nullptr) {
- data_bytes += kDataArrSize;
- }
- } while (code_ptr != nullptr || data_ptr != nullptr);
- // Make sure we added a reasonable amount
- CHECK_GT(code_bytes, 0u);
- CHECK_LE(code_bytes, kSize);
- CHECK_GT(data_bytes, 0u);
- CHECK_LE(data_bytes, kSize);
- CHECK_GE(code_bytes + data_bytes, kSize * 4 / 5);
-}
-
-} // namespace jit
-} // namespace art
diff --git a/runtime/leb128_test.cc b/runtime/leb128_test.cc
index 09f7ecc..122f55e 100644
--- a/runtime/leb128_test.cc
+++ b/runtime/leb128_test.cc
@@ -88,7 +88,7 @@
{-0x08000000, {0x80, 0x80, 0x80, 0x40, 0}},
{-0x08000001, {0xFF, 0xFF, 0xFF, 0xBF, 0x7F}},
{-0x20000000, {0x80, 0x80, 0x80, 0x80, 0x7E}},
- {(-1) << 31, {0x80, 0x80, 0x80, 0x80, 0x78}},
+ {static_cast<int32_t>(0x80000000), {0x80, 0x80, 0x80, 0x80, 0x78}},
};
TEST(Leb128Test, UnsignedSinglesVector) {
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 2ac44fc..53fedab 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -565,24 +565,58 @@
return nullptr;
}
-ArtField* Class::FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) {
- // Is the field in this class?
- // Interfaces are not relevant because they can't contain instance fields.
- for (size_t i = 0; i < NumInstanceFields(); ++i) {
- ArtField* f = GetInstanceField(i);
- if (name == f->GetName() && type == f->GetTypeDescriptor()) {
- return f;
+// Custom binary search to avoid double comparisons from std::binary_search.
+static ArtField* FindFieldByNameAndType(LengthPrefixedArray<ArtField>* fields,
+ const StringPiece& name,
+ const StringPiece& type)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (fields == nullptr) {
+ return nullptr;
+ }
+ size_t low = 0;
+ size_t high = fields->Length();
+ ArtField* ret = nullptr;
+ while (low < high) {
+ size_t mid = (low + high) / 2;
+ ArtField& field = fields->At(mid);
+ // Fields are sorted by class, then name, then type descriptor. This is verified in dex file
+ // verifier. There can be multiple fields with the same in the same class name due to proguard.
+ int result = StringPiece(field.GetName()).Compare(name);
+ if (result == 0) {
+ result = StringPiece(field.GetTypeDescriptor()).Compare(type);
+ }
+ if (result < 0) {
+ low = mid + 1;
+ } else if (result > 0) {
+ high = mid;
+ } else {
+ ret = &field;
+ break;
}
}
- return nullptr;
+ if (kIsDebugBuild) {
+ ArtField* found = nullptr;
+ for (ArtField& field : MakeIterationRangeFromLengthPrefixedArray(fields)) {
+ if (name == field.GetName() && type == field.GetTypeDescriptor()) {
+ found = &field;
+ break;
+ }
+ }
+ CHECK_EQ(found, ret) << "Found " << PrettyField(found) << " vs " << PrettyField(ret);
+ }
+ return ret;
+}
+
+ArtField* Class::FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) {
+ // Binary search by name. Interfaces are not relevant because they can't contain instance fields.
+ return FindFieldByNameAndType(GetIFieldsPtr(), name, type);
}
ArtField* Class::FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) {
if (GetDexCache() == dex_cache) {
- for (size_t i = 0; i < NumInstanceFields(); ++i) {
- ArtField* f = GetInstanceField(i);
- if (f->GetDexFieldIndex() == dex_field_idx) {
- return f;
+ for (ArtField& field : GetIFields()) {
+ if (field.GetDexFieldIndex() == dex_field_idx) {
+ return &field;
}
}
}
@@ -615,21 +649,14 @@
ArtField* Class::FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) {
DCHECK(type != nullptr);
- for (size_t i = 0; i < NumStaticFields(); ++i) {
- ArtField* f = GetStaticField(i);
- if (name == f->GetName() && type == f->GetTypeDescriptor()) {
- return f;
- }
- }
- return nullptr;
+ return FindFieldByNameAndType(GetSFieldsPtr(), name, type);
}
ArtField* Class::FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) {
if (dex_cache == GetDexCache()) {
- for (size_t i = 0; i < NumStaticFields(); ++i) {
- ArtField* f = GetStaticField(i);
- if (f->GetDexFieldIndex() == dex_field_idx) {
- return f;
+ for (ArtField& field : GetSFields()) {
+ if (field.GetDexFieldIndex() == dex_field_idx) {
+ return &field;
}
}
}
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 5625499..40aca0d 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -483,15 +483,4 @@
OatMethodOffsets::~OatMethodOffsets() {}
-OatQuickMethodHeader::OatQuickMethodHeader(
- uint32_t mapping_table_offset, uint32_t vmap_table_offset, uint32_t gc_map_offset,
- uint32_t frame_size_in_bytes, uint32_t core_spill_mask, uint32_t fp_spill_mask,
- uint32_t code_size)
- : mapping_table_offset_(mapping_table_offset), vmap_table_offset_(vmap_table_offset),
- gc_map_offset_(gc_map_offset),
- frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask), code_size_(code_size) {
-}
-
-OatQuickMethodHeader::~OatQuickMethodHeader() {}
-
} // namespace art
diff --git a/runtime/oat.h b/runtime/oat.h
index 2aa5783..276e7f3 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -22,7 +22,6 @@
#include "arch/instruction_set.h"
#include "base/macros.h"
#include "dex_file.h"
-#include "quick/quick_method_frame_info.h"
#include "safe_map.h"
namespace art {
@@ -170,30 +169,6 @@
uint32_t code_offset_;
};
-// OatQuickMethodHeader precedes the raw code chunk generated by the Quick compiler.
-class PACKED(4) OatQuickMethodHeader {
- public:
- OatQuickMethodHeader(uint32_t mapping_table_offset = 0U, uint32_t vmap_table_offset = 0U,
- uint32_t gc_map_offset = 0U, uint32_t frame_size_in_bytes = 0U,
- uint32_t core_spill_mask = 0U, uint32_t fp_spill_mask = 0U,
- uint32_t code_size = 0U);
-
- ~OatQuickMethodHeader();
-
- OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;
-
- // The offset in bytes from the start of the mapping table to the end of the header.
- uint32_t mapping_table_offset_;
- // The offset in bytes from the start of the vmap table to the end of the header.
- uint32_t vmap_table_offset_;
- // The offset in bytes from the start of the gc map to the end of the header.
- uint32_t gc_map_offset_;
- // The stack frame information.
- QuickMethodFrameInfo frame_info_;
- // The code size in bytes.
- uint32_t code_size_;
-};
-
} // namespace art
#endif // ART_RUNTIME_OAT_H_
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index f7913e1..7b92120 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_OAT_FILE_INL_H_
#include "oat_file.h"
+#include "oat_quick_method_header.h"
namespace art {
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 29b879e..8d5418d 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -65,8 +65,10 @@
const InstructionSet isa,
bool load_executable,
const char* package_name)
- : dex_location_(dex_location), isa_(isa),
- package_name_(package_name), load_executable_(load_executable) {
+ : isa_(isa), package_name_(package_name), load_executable_(load_executable) {
+ CHECK(dex_location != nullptr) << "OatFileAssistant: null dex location";
+ dex_location_.assign(dex_location);
+
if (load_executable_ && isa != kRuntimeISA) {
LOG(WARNING) << "OatFileAssistant: Load executable specified, "
<< "but isa is not kRuntimeISA. Will not attempt to load executable.";
@@ -110,7 +112,7 @@
ClassLinker* class_linker = runtime->GetClassLinker();
const auto& boot_class_path = class_linker->GetBootClassPath();
for (size_t i = 0; i < boot_class_path.size(); i++) {
- if (boot_class_path[i]->GetLocation() == std::string(dex_location_)) {
+ if (boot_class_path[i]->GetLocation() == dex_location_) {
VLOG(oat) << "Dex location " << dex_location_ << " is in boot class path";
return true;
}
@@ -266,7 +268,6 @@
const std::string* OatFileAssistant::OdexFileName() {
if (!cached_odex_file_name_attempted_) {
- CHECK(dex_location_ != nullptr) << "OatFileAssistant: null dex location";
cached_odex_file_name_attempted_ = true;
std::string error_msg;
@@ -330,15 +331,13 @@
cached_oat_file_name_attempted_ = true;
// Compute the oat file name from the dex location.
- CHECK(dex_location_ != nullptr) << "OatFileAssistant: null dex location";
-
// TODO: The oat file assistant should be the definitive place for
// determining the oat file name from the dex location, not
// GetDalvikCacheFilename.
std::string cache_dir = StringPrintf("%s%s",
DalvikCacheDirectory().c_str(), GetInstructionSetString(isa_));
std::string error_msg;
- cached_oat_file_name_found_ = GetDalvikCacheFilename(dex_location_,
+ cached_oat_file_name_found_ = GetDalvikCacheFilename(dex_location_.c_str(),
cache_dir.c_str(), &cached_oat_file_name_, &error_msg);
if (!cached_oat_file_name_found_) {
// If we can't determine the oat file name, we treat the oat file as
@@ -413,7 +412,7 @@
// what we provide, which verifies the primary dex checksum for us.
const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum();
const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
- dex_location_, dex_checksum_pointer, false);
+ dex_location_.c_str(), dex_checksum_pointer, false);
if (oat_dex_file == nullptr) {
return true;
}
@@ -421,7 +420,7 @@
// Verify the dex checksums for any secondary multidex files
for (size_t i = 1; ; i++) {
std::string secondary_dex_location
- = DexFile::GetMultiDexLocation(i, dex_location_);
+ = DexFile::GetMultiDexLocation(i, dex_location_.c_str());
const OatFile::OatDexFile* secondary_oat_dex_file
= file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
if (secondary_oat_dex_file == nullptr) {
@@ -613,16 +612,14 @@
CHECK(error_msg != nullptr);
if (input_file == nullptr) {
- *error_msg = "Patching of oat file for dex location "
- + std::string(dex_location_)
+ *error_msg = "Patching of oat file for dex location " + dex_location_
+ " not attempted because the input file name could not be determined.";
return false;
}
const std::string& input_file_name = *input_file;
if (OatFileName() == nullptr) {
- *error_msg = "Patching of oat file for dex location "
- + std::string(dex_location_)
+ *error_msg = "Patching of oat file for dex location " + dex_location_
+ " not attempted because the oat file name could not be determined.";
return false;
}
@@ -666,8 +663,7 @@
CHECK(error_msg != nullptr);
if (OatFileName() == nullptr) {
- *error_msg = "Generation of oat file for dex location "
- + std::string(dex_location_)
+ *error_msg = "Generation of oat file for dex location " + dex_location_
+ " not attempted because the oat file name could not be determined.";
return false;
}
@@ -681,14 +677,14 @@
}
std::vector<std::string> args;
- args.push_back("--dex-file=" + std::string(dex_location_));
+ args.push_back("--dex-file=" + dex_location_);
args.push_back("--oat-file=" + oat_file_name);
// dex2oat ignores missing dex files and doesn't report an error.
// Check explicitly here so we can detect the error properly.
// TODO: Why does dex2oat behave that way?
- if (!OS::FileExists(dex_location_)) {
- *error_msg = "Dex location " + std::string(dex_location_) + " does not exists.";
+ if (!OS::FileExists(dex_location_.c_str())) {
+ *error_msg = "Dex location " + dex_location_ + " does not exists.";
return false;
}
@@ -839,8 +835,7 @@
required_dex_checksum_attempted_ = true;
required_dex_checksum_found_ = false;
std::string error_msg;
- CHECK(dex_location_ != nullptr) << "OatFileAssistant provided no dex location";
- if (DexFile::GetChecksum(dex_location_, &cached_required_dex_checksum_, &error_msg)) {
+ if (DexFile::GetChecksum(dex_location_.c_str(), &cached_required_dex_checksum_, &error_msg)) {
required_dex_checksum_found_ = true;
has_original_dex_files_ = true;
} else {
@@ -853,7 +848,7 @@
const OatFile* odex_file = GetOdexFile();
if (odex_file != nullptr) {
const OatFile::OatDexFile* odex_dex_file = odex_file->GetOatDexFile(
- dex_location_, nullptr, false);
+ dex_location_.c_str(), nullptr, false);
if (odex_dex_file != nullptr) {
cached_required_dex_checksum_ = odex_dex_file->GetDexFileLocationChecksum();
required_dex_checksum_found_ = true;
@@ -873,7 +868,7 @@
std::string error_msg;
cached_odex_file_.reset(OatFile::Open(odex_file_name.c_str(),
odex_file_name.c_str(), nullptr, nullptr, load_executable_,
- dex_location_, &error_msg));
+ dex_location_.c_str(), &error_msg));
if (cached_odex_file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing pre-compiled oat file "
<< odex_file_name << ": " << error_msg;
@@ -904,7 +899,7 @@
std::string error_msg;
cached_oat_file_.reset(OatFile::Open(oat_file_name.c_str(),
oat_file_name.c_str(), nullptr, nullptr, load_executable_,
- dex_location_, &error_msg));
+ dex_location_.c_str(), &error_msg));
if (cached_oat_file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing oat file "
<< oat_file_name << ": " << error_msg;
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 664db98..f781532 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -369,9 +369,7 @@
// remaining lifetime of the OatFileAssistant object.
ScopedFlock flock_;
- // In a properly constructed OatFileAssistant object, dex_location_ should
- // never be null.
- const char* dex_location_ = nullptr;
+ std::string dex_location_;
// In a properly constructed OatFileAssistant object, isa_ should be either
// the 32 or 64 bit variant for the current device.
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
new file mode 100644
index 0000000..9786c05
--- /dev/null
+++ b/runtime/oat_quick_method_header.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat_quick_method_header.h"
+
+#include "art_method.h"
+#include "mapping_table.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+OatQuickMethodHeader::OatQuickMethodHeader(
+ uint32_t mapping_table_offset,
+ uint32_t vmap_table_offset,
+ uint32_t gc_map_offset,
+ uint32_t frame_size_in_bytes,
+ uint32_t core_spill_mask,
+ uint32_t fp_spill_mask,
+ uint32_t code_size)
+ : mapping_table_offset_(mapping_table_offset),
+ vmap_table_offset_(vmap_table_offset),
+ gc_map_offset_(gc_map_offset),
+ frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask),
+ code_size_(code_size) {}
+
+OatQuickMethodHeader::~OatQuickMethodHeader() {}
+
+uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method,
+ const uintptr_t pc,
+ bool abort_on_failure) const {
+ const void* entry_point = GetEntryPoint();
+ uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
+ if (IsOptimized()) {
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
+ if (stack_map.IsValid()) {
+ return stack_map.GetDexPc(encoding);
+ }
+ } else {
+ MappingTable table(GetMappingTable());
+ // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
+ // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
+ if (table.TotalSize() == 0) {
+ DCHECK(method->IsNative());
+ return DexFile::kDexNoIndex;
+ }
+
+ // Assume the caller wants a pc-to-dex mapping so check here first.
+ typedef MappingTable::PcToDexIterator It;
+ for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.NativePcOffset() == sought_offset) {
+ return cur.DexPc();
+ }
+ }
+ // Now check dex-to-pc mappings.
+ typedef MappingTable::DexToPcIterator It2;
+ for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.NativePcOffset() == sought_offset) {
+ return cur.DexPc();
+ }
+ }
+ }
+ if (abort_on_failure) {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Failed to find Dex offset for PC offset "
+ << reinterpret_cast<void*>(sought_offset)
+ << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
+ << " current entry_point=" << method->GetEntryPointFromQuickCompiledCode()
+ << ") in " << PrettyMethod(method);
+ }
+ return DexFile::kDexNoIndex;
+}
+
+uintptr_t OatQuickMethodHeader::ToNativeQuickPc(ArtMethod* method,
+ const uint32_t dex_pc,
+ bool is_for_catch_handler,
+ bool abort_on_failure) const {
+ const void* entry_point = GetEntryPoint();
+ if (IsOptimized()) {
+ // Optimized code does not have a mapping table. Search for the dex-to-pc
+ // mapping in stack maps.
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+
+ // All stack maps are stored in the same CodeItem section, safepoint stack
+ // maps first, then catch stack maps. We use `is_for_catch_handler` to select
+ // the order of iteration.
+ StackMap stack_map =
+ LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
+ : code_info.GetStackMapForDexPc(dex_pc, encoding);
+ if (stack_map.IsValid()) {
+ return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
+ }
+ } else {
+ MappingTable table(GetMappingTable());
+ if (table.TotalSize() == 0) {
+ DCHECK_EQ(dex_pc, 0U);
+ return 0; // Special no mapping/pc == 0 case
+ }
+ // Assume the caller wants a dex-to-pc mapping so check here first.
+ typedef MappingTable::DexToPcIterator It;
+ for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ }
+ }
+ // Now check pc-to-dex mappings.
+ typedef MappingTable::PcToDexIterator It2;
+ for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ }
+ }
+ }
+
+ if (abort_on_failure) {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
+ << " in " << PrettyMethod(method);
+ }
+ return UINTPTR_MAX;
+}
+
+} // namespace art
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
new file mode 100644
index 0000000..6eadd87
--- /dev/null
+++ b/runtime/oat_quick_method_header.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_
+#define ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_
+
+#include "arch/instruction_set.h"
+#include "base/macros.h"
+#include "quick/quick_method_frame_info.h"
+#include "stack_map.h"
+
+namespace art {
+
+class ArtMethod;
+
+// OatQuickMethodHeader precedes the raw code chunk generated by the compiler.
+class PACKED(4) OatQuickMethodHeader {
+ public:
+ OatQuickMethodHeader(uint32_t mapping_table_offset = 0U,
+ uint32_t vmap_table_offset = 0U,
+ uint32_t gc_map_offset = 0U,
+ uint32_t frame_size_in_bytes = 0U,
+ uint32_t core_spill_mask = 0U,
+ uint32_t fp_spill_mask = 0U,
+ uint32_t code_size = 0U);
+
+ ~OatQuickMethodHeader();
+
+ OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;
+
+ uintptr_t NativeQuickPcOffset(const uintptr_t pc) const {
+ return pc - reinterpret_cast<uintptr_t>(GetEntryPoint());
+ }
+
+ bool IsOptimized() const {
+ return gc_map_offset_ == 0 && vmap_table_offset_ != 0;
+ }
+
+ CodeInfo GetOptimizedCodeInfo() const {
+ DCHECK(IsOptimized());
+ const void* data = reinterpret_cast<const void*>(code_ - vmap_table_offset_);
+ return CodeInfo(data);
+ }
+
+ const uint8_t* GetCode() const {
+ return code_;
+ }
+
+ const uint8_t* GetNativeGcMap() const {
+ return (gc_map_offset_ == 0) ? nullptr : code_ - gc_map_offset_;
+ }
+
+ const uint8_t* GetMappingTable() const {
+ return (mapping_table_offset_ == 0) ? nullptr : code_ - mapping_table_offset_;
+ }
+
+ const uint8_t* GetVmapTable() const {
+ CHECK(!IsOptimized()) << "Unimplemented vmap table for optimizing compiler";
+ return (vmap_table_offset_ == 0) ? nullptr : code_ - vmap_table_offset_;
+ }
+
+ bool Contains(uintptr_t pc) const {
+ uintptr_t code_start = reinterpret_cast<uintptr_t>(code_);
+ return code_start <= pc && pc <= (code_start + code_size_);
+ }
+
+ const uint8_t* GetEntryPoint() const {
+ // When the runtime architecture is ARM, `kRuntimeISA` is set to `kArm`
+ // (not `kThumb2`), *but* we always generate code for the Thumb-2
+ // instruction set anyway. Thumb-2 requires the entrypoint to be of
+ // offset 1.
+ static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
+ return (kRuntimeISA == kArm)
+ ? reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(code_) | 1)
+ : code_;
+ }
+
+ template <bool kCheckFrameSize = true>
+ uint32_t GetFrameSizeInBytes() {
+ uint32_t result = frame_info_.FrameSizeInBytes();
+ if (kCheckFrameSize) {
+ DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
+ }
+ return result;
+ }
+
+ QuickMethodFrameInfo GetFrameInfo() const {
+ return frame_info_;
+ }
+
+ uintptr_t ToNativeQuickPc(ArtMethod* method,
+ const uint32_t dex_pc,
+ bool is_for_catch_handler,
+ bool abort_on_failure = true) const;
+
+ uint32_t ToDexPc(ArtMethod* method, const uintptr_t pc, bool abort_on_failure = true) const;
+
+ // The offset in bytes from the start of the mapping table to the end of the header.
+ uint32_t mapping_table_offset_;
+ // The offset in bytes from the start of the vmap table to the end of the header.
+ uint32_t vmap_table_offset_;
+ // The offset in bytes from the start of the gc map to the end of the header.
+ uint32_t gc_map_offset_;
+ // The stack frame information.
+ QuickMethodFrameInfo frame_info_;
+ // The code size in bytes.
+ uint32_t code_size_;
+ // The actual code.
+ uint8_t code_[0];
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_
diff --git a/runtime/quick/quick_method_frame_info.h b/runtime/quick/quick_method_frame_info.h
index 684d4da..71f8265 100644
--- a/runtime/quick/quick_method_frame_info.h
+++ b/runtime/quick/quick_method_frame_info.h
@@ -50,6 +50,10 @@
return fp_spill_mask_;
}
+ size_t GetReturnPcOffset() const {
+ return FrameSizeInBytes() - sizeof(void*);
+ }
+
private:
uint32_t frame_size_in_bytes_;
uint32_t core_spill_mask_;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 7ba19ab..53b4f3a 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -17,7 +17,6 @@
#include "quick_exception_handler.h"
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils.h"
@@ -27,6 +26,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/throwable.h"
+#include "oat_quick_method_header.h"
#include "stack_map.h"
#include "verifier/method_verifier.h"
@@ -36,13 +36,19 @@
static constexpr size_t kInvalidFrameDepth = 0xffffffff;
QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimization)
- : self_(self), context_(self->GetLongJumpContext()), is_deoptimization_(is_deoptimization),
- method_tracing_active_(is_deoptimization ||
- Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
- handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_quick_arg0_(0),
- handler_method_(nullptr), handler_dex_pc_(0), clear_exception_(false),
- handler_frame_depth_(kInvalidFrameDepth) {
-}
+ : self_(self),
+ context_(self->GetLongJumpContext()),
+ is_deoptimization_(is_deoptimization),
+ method_tracing_active_(is_deoptimization ||
+ Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
+ handler_quick_frame_(nullptr),
+ handler_quick_frame_pc_(0),
+ handler_method_header_(nullptr),
+ handler_quick_arg0_(0),
+ handler_method_(nullptr),
+ handler_dex_pc_(0),
+ clear_exception_(false),
+ handler_frame_depth_(kInvalidFrameDepth) {}
// Finds catch handler.
class CatchBlockStackVisitor FINAL : public StackVisitor {
@@ -62,6 +68,7 @@
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
uint32_t next_dex_pc;
ArtMethod* next_art_method;
bool has_next = GetNextMethodAndDexPc(&next_art_method, &next_dex_pc);
@@ -101,8 +108,10 @@
exception_handler_->SetHandlerMethod(method);
exception_handler_->SetHandlerDexPc(found_dex_pc);
exception_handler_->SetHandlerQuickFramePc(
- GetCurrentCode().ToNativeQuickPc(found_dex_pc, /* is_catch_handler */ true));
+ GetCurrentOatQuickMethodHeader()->ToNativeQuickPc(
+ method, found_dex_pc, /* is_catch_handler */ true));
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
return false; // End stack walk.
} else if (UNLIKELY(GetThread()->HasDebuggerShadowFrames())) {
// We are going to unwind this frame. Did we prepare a shadow frame for debugging?
@@ -160,8 +169,8 @@
}
// If the handler is in optimized code, we need to set the catch environment.
if (*handler_quick_frame_ != nullptr &&
- handler_method_ != nullptr &&
- ArtCode(handler_quick_frame_).IsOptimized(sizeof(void*))) {
+ handler_method_header_ != nullptr &&
+ handler_method_header_->IsOptimized()) {
SetCatchEnvironmentForOptimizedHandler(&visitor);
}
}
@@ -202,14 +211,14 @@
void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor) {
DCHECK(!is_deoptimization_);
DCHECK(*handler_quick_frame_ != nullptr) << "Method should not be called on upcall exceptions";
- DCHECK(handler_method_ != nullptr && ArtCode(handler_quick_frame_).IsOptimized(sizeof(void*)));
+ DCHECK(handler_method_ != nullptr && handler_method_header_->IsOptimized());
if (kDebugExceptionDelivery) {
self_->DumpStack(LOG(INFO) << "Setting catch phis: ");
}
const size_t number_of_vregs = handler_method_->GetCodeItem()->registers_size_;
- CodeInfo code_info = ArtCode(handler_quick_frame_).GetOptimizedCodeInfo();
+ CodeInfo code_info = handler_method_header_->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
// Find stack map of the throwing instruction.
@@ -285,6 +294,7 @@
// and last pc so that we may long jump to them.
exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
if (!stacked_shadow_frame_pushed_) {
// In case there is no deoptimized shadow frame for this upcall, we still
// need to push a nullptr to the stack since there is always a matching pop after
@@ -305,7 +315,43 @@
CHECK_EQ(GetFrameDepth(), 1U);
return true;
} else {
- HandleDeoptimization(method);
+ // Check if a shadow frame already exists for debugger's set-local-value purpose.
+ const size_t frame_id = GetFrameId();
+ ShadowFrame* new_frame = GetThread()->FindDebuggerShadowFrame(frame_id);
+ const bool* updated_vregs;
+ const size_t num_regs = method->GetCodeItem()->registers_size_;
+ if (new_frame == nullptr) {
+ new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, method, GetDexPc());
+ updated_vregs = nullptr;
+ } else {
+ updated_vregs = GetThread()->GetUpdatedVRegFlags(frame_id);
+ DCHECK(updated_vregs != nullptr);
+ }
+ if (GetCurrentOatQuickMethodHeader()->IsOptimized()) {
+ HandleOptimizingDeoptimization(method, new_frame, updated_vregs);
+ } else {
+ HandleQuickDeoptimization(method, new_frame, updated_vregs);
+ }
+ if (updated_vregs != nullptr) {
+ // Calling Thread::RemoveDebuggerShadowFrameMapping will also delete the updated_vregs
+ // array so this must come after we processed the frame.
+ GetThread()->RemoveDebuggerShadowFrameMapping(frame_id);
+ DCHECK(GetThread()->FindDebuggerShadowFrame(frame_id) == nullptr);
+ }
+ if (prev_shadow_frame_ != nullptr) {
+ prev_shadow_frame_->SetLink(new_frame);
+ } else {
+ // Will be popped after the long jump after DeoptimizeStack(),
+ // right before interpreter::EnterInterpreterFromDeoptimize().
+ stacked_shadow_frame_pushed_ = true;
+ GetThread()->PushStackedShadowFrame(
+ new_frame,
+ single_frame_deopt_
+ ? StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame
+ : StackedShadowFrameType::kDeoptimizationShadowFrame);
+ }
+ prev_shadow_frame_ = new_frame;
+
if (single_frame_deopt_ && !IsInInlinedFrame()) {
// Single-frame deopt ends at the first non-inlined frame and needs to store that method.
exception_handler_->SetHandlerQuickArg0(reinterpret_cast<uintptr_t>(method));
@@ -316,16 +362,103 @@
}
private:
+ void HandleOptimizingDeoptimization(ArtMethod* m,
+ ShadowFrame* new_frame,
+ const bool* updated_vregs)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ CodeInfo code_info = method_header->GetOptimizedCodeInfo();
+ uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ const size_t number_of_vregs = m->GetCodeItem()->registers_size_;
+ DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+ MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
+ uint32_t register_mask = stack_map.GetRegisterMask(encoding);
+
+ for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
+ if (updated_vregs != nullptr && updated_vregs[vreg]) {
+ // Keep the value set by debugger.
+ continue;
+ }
+
+ DexRegisterLocation::Kind location =
+ vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+ static constexpr uint32_t kDeadValue = 0xEBADDE09;
+ uint32_t value = kDeadValue;
+ bool is_reference = false;
+
+ switch (location) {
+ case DexRegisterLocation::Kind::kInStack: {
+ const int32_t offset = vreg_map.GetStackOffsetInBytes(vreg,
+ number_of_vregs,
+ code_info,
+ encoding);
+ const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
+ value = *reinterpret_cast<const uint32_t*>(addr);
+ uint32_t bit = (offset >> 2);
+ if (stack_mask.size_in_bits() > bit && stack_mask.LoadBit(bit)) {
+ is_reference = true;
+ }
+ break;
+ }
+ case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInRegisterHigh:
+ case DexRegisterLocation::Kind::kInFpuRegister:
+ case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
+ uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info, encoding);
+ bool result = GetRegisterIfAccessible(reg, ToVRegKind(location), &value);
+ CHECK(result);
+ if (location == DexRegisterLocation::Kind::kInRegister) {
+ if (((1u << reg) & register_mask) != 0) {
+ is_reference = true;
+ }
+ }
+ break;
+ }
+ case DexRegisterLocation::Kind::kConstant: {
+ value = vreg_map.GetConstant(vreg, number_of_vregs, code_info, encoding);
+ if (value == 0) {
+ // Make it a reference for extra safety.
+ is_reference = true;
+ }
+ break;
+ }
+ case DexRegisterLocation::Kind::kNone: {
+ break;
+ }
+ default: {
+ LOG(FATAL)
+ << "Unexpected location kind"
+ << DexRegisterLocation::PrettyDescriptor(
+ vreg_map.GetLocationInternalKind(vreg,
+ number_of_vregs,
+ code_info,
+ encoding));
+ UNREACHABLE();
+ }
+ }
+ if (is_reference) {
+ new_frame->SetVRegReference(vreg, reinterpret_cast<mirror::Object*>(value));
+ } else {
+ new_frame->SetVReg(vreg, value);
+ }
+ }
+ }
+
static VRegKind GetVRegKind(uint16_t reg, const std::vector<int32_t>& kinds) {
return static_cast<VRegKind>(kinds.at(reg * 2));
}
- void HandleDeoptimization(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void HandleQuickDeoptimization(ArtMethod* m,
+ ShadowFrame* new_frame,
+ const bool* updated_vregs)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
CHECK(code_item != nullptr) << "No code item for " << PrettyMethod(m);
uint16_t num_regs = code_item->registers_size_;
uint32_t dex_pc = GetDexPc();
- StackHandleScope<2> hs(GetThread()); // Dex cache, class loader and method.
+ StackHandleScope<2> hs(GetThread()); // Dex cache and class loader.
mirror::Class* declaring_class = m->GetDeclaringClass();
Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -335,17 +468,6 @@
true, true);
bool verifier_success = verifier.Verify();
CHECK(verifier_success) << PrettyMethod(m);
- // Check if a shadow frame already exists for debugger's set-local-value purpose.
- const size_t frame_id = GetFrameId();
- ShadowFrame* new_frame = GetThread()->FindDebuggerShadowFrame(frame_id);
- const bool* updated_vregs;
- if (new_frame == nullptr) {
- new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, m, dex_pc);
- updated_vregs = nullptr;
- } else {
- updated_vregs = GetThread()->GetUpdatedVRegFlags(frame_id);
- DCHECK(updated_vregs != nullptr);
- }
{
ScopedStackedShadowFramePusher pusher(GetThread(), new_frame,
StackedShadowFrameType::kShadowFrameUnderConstruction);
@@ -452,25 +574,6 @@
}
}
}
- if (updated_vregs != nullptr) {
- // Calling Thread::RemoveDebuggerShadowFrameMapping will also delete the updated_vregs
- // array so this must come after we processed the frame.
- GetThread()->RemoveDebuggerShadowFrameMapping(frame_id);
- DCHECK(GetThread()->FindDebuggerShadowFrame(frame_id) == nullptr);
- }
- if (prev_shadow_frame_ != nullptr) {
- prev_shadow_frame_->SetLink(new_frame);
- } else {
- // Will be popped after the long jump after DeoptimizeStack(),
- // right before interpreter::EnterInterpreterFromDeoptimize().
- stacked_shadow_frame_pushed_ = true;
- GetThread()->PushStackedShadowFrame(
- new_frame,
- single_frame_deopt_
- ? StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame
- : StackedShadowFrameType::kDeoptimizationShadowFrame);
- }
- prev_shadow_frame_ = new_frame;
}
QuickExceptionHandler* const exception_handler_;
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 89d6a25..eedf83f 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -71,6 +71,10 @@
handler_quick_frame_pc_ = handler_quick_frame_pc;
}
+ void SetHandlerMethodHeader(const OatQuickMethodHeader* handler_method_header) {
+ handler_method_header_ = handler_method_header;
+ }
+
void SetHandlerQuickArg0(uintptr_t handler_quick_arg0) {
handler_quick_arg0_ = handler_quick_arg0;
}
@@ -115,6 +119,8 @@
ArtMethod** handler_quick_frame_;
// PC to branch to for the handler.
uintptr_t handler_quick_frame_pc_;
+ // Quick code of the handler.
+ const OatQuickMethodHeader* handler_method_header_;
// The value for argument 0.
uintptr_t handler_quick_arg0_;
// The handler method to report to the debugger.
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index bd89be5..c7c2709 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -157,7 +157,8 @@
result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(SCHAR_MAX, result.GetB());
- args[0].b = (SCHAR_MIN << 24) >> 24;
+ static_assert(SCHAR_MIN == -128, "SCHAR_MIN unexpected");
+ args[0].b = SCHAR_MIN;
result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(SCHAR_MIN, result.GetB());
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6c459a3..556ba56 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -326,7 +326,7 @@
if (self == nullptr) {
os << "(Aborting thread was not attached to runtime!)\n";
DumpKernelStack(os, GetTid(), " kernel: ", false);
- DumpNativeStack(os, GetTid(), " native: ", nullptr);
+ DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
} else {
os << "Aborting thread:\n";
if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 44a13c9..122dcb1 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -41,7 +41,7 @@
public:
explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
void Dump(std::ostream& os) const {
- DumpNativeStack(os, GetTid(), "\t", nullptr, nullptr, raw_context_);
+ DumpNativeStack(os, GetTid(), nullptr, "\t", nullptr, raw_context_);
}
private:
// Stores the context of the signal that was unexpected and will terminate the runtime. The
diff --git a/runtime/stack.cc b/runtime/stack.cc
index d8d916c..9359d27 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -17,7 +17,6 @@
#include "stack.h"
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "base/hex_dump.h"
#include "entrypoints/entrypoint_utils-inl.h"
@@ -25,10 +24,13 @@
#include "gc_map.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "linear_alloc.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "oat_quick_method_header.h"
#include "quick/quick_method_frame_info.h"
#include "runtime.h"
#include "thread.h"
@@ -103,6 +105,7 @@
cur_shadow_frame_(nullptr),
cur_quick_frame_(nullptr),
cur_quick_frame_pc_(0),
+ cur_oat_quick_method_header_(nullptr),
num_frames_(num_frames),
cur_depth_(0),
current_inlining_depth_(0),
@@ -111,9 +114,9 @@
}
InlineInfo StackVisitor::GetCurrentInlineInfo() const {
- ArtCode outer_code = GetCurrentCode();
- uint32_t native_pc_offset = outer_code.NativeQuickPcOffset(cur_quick_frame_pc_);
- CodeInfo code_info = outer_code.GetOptimizedCodeInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
+ CodeInfo code_info = method_header->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
@@ -142,8 +145,11 @@
if (IsInInlinedFrame()) {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
return GetCurrentInlineInfo().GetDexPcAtDepth(depth_in_stack_map);
+ } else if (cur_oat_quick_method_header_ == nullptr) {
+ return DexFile::kDexNoIndex;
} else {
- return GetCurrentCode().ToDexPc(cur_quick_frame_pc_, abort_on_failure);
+ return cur_oat_quick_method_header_->ToDexPc(
+ GetMethod(), cur_quick_frame_pc_, abort_on_failure);
}
} else {
return 0;
@@ -161,8 +167,7 @@
} else if (m->IsNative()) {
if (cur_quick_frame_ != nullptr) {
HandleScope* hs = reinterpret_cast<HandleScope*>(
- reinterpret_cast<char*>(cur_quick_frame_) +
- GetCurrentCode().GetHandleScopeOffset().SizeValue());
+ reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*));
return hs->GetReference(0);
} else {
return cur_shadow_frame_->GetVRegReference(0);
@@ -192,7 +197,7 @@
size_t StackVisitor::GetNativePcOffset() const {
DCHECK(!IsShadowFrame());
- return GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
+ return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
}
bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
@@ -201,10 +206,11 @@
if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
return false;
}
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ if (method_header->IsOptimized()) {
return true; // TODO: Implement.
}
- const uint8_t* native_gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
+ const uint8_t* native_gc_map = method_header->GetNativeGcMap();
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
// Can't be null or how would we compile its instructions?
@@ -213,7 +219,7 @@
size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_));
const uint8_t* reg_bitmap = nullptr;
if (num_regs > 0) {
- uintptr_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
+ uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
}
@@ -252,7 +258,7 @@
if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
return true;
}
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (cur_oat_quick_method_header_->IsOptimized()) {
return GetVRegFromOptimizedCode(m, vreg, kind, val);
} else {
return GetVRegFromQuickCode(m, vreg, kind, val);
@@ -267,8 +273,9 @@
bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
DCHECK_EQ(m, GetMethod());
- const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
- QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
+ const VmapTable vmap_table(method_header->GetVmapTable());
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
@@ -294,10 +301,11 @@
// its instructions?
uint16_t number_of_dex_registers = code_item->registers_size_;
DCHECK_LT(vreg, code_item->registers_size_);
- CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ CodeInfo code_info = method_header->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
- uint32_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
+ uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
size_t depth_in_stack_map = current_inlining_depth_ - 1;
@@ -402,7 +410,7 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (cur_oat_quick_method_header_->IsOptimized()) {
return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
} else {
return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
@@ -417,8 +425,9 @@
bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const {
DCHECK_EQ(m, GetMethod());
- const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
- QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
+ const VmapTable vmap_table(method_header->GetVmapTable());
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
@@ -477,7 +486,7 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (cur_oat_quick_method_header_->IsOptimized()) {
return false;
} else {
return SetVRegFromQuickCode(m, vreg, new_value, kind);
@@ -492,8 +501,9 @@
VRegKind kind) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
- QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
+ const VmapTable vmap_table(method_header->GetVmapTable());
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
@@ -584,7 +594,7 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (cur_oat_quick_method_header_->IsOptimized()) {
return false;
} else {
return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
@@ -599,8 +609,9 @@
bool StackVisitor::SetVRegPairFromQuickCode(
ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
DCHECK_EQ(m, GetMethod());
- const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
- QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
+ const VmapTable vmap_table(method_header->GetVmapTable());
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
@@ -717,14 +728,14 @@
uintptr_t StackVisitor::GetReturnPc() const {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
DCHECK(sp != nullptr);
- uint8_t* pc_addr = sp + GetCurrentCode().GetReturnPcOffset().SizeValue();
+ uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
CHECK(sp != nullptr);
- uint8_t* pc_addr = sp + GetCurrentCode().GetReturnPcOffset().SizeValue();
+ uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
@@ -821,6 +832,45 @@
return thread->GetInstrumentationStack()->at(depth);
}
+static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
+ return;
+ }
+
+ if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+ return;
+ }
+
+ const void* code = method->GetEntryPointFromQuickCompiledCode();
+ if (code == GetQuickInstrumentationEntryPoint()) {
+ return;
+ }
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (class_linker->IsQuickToInterpreterBridge(code) ||
+ class_linker->IsQuickResolutionStub(code)) {
+ return;
+ }
+
+ // If we are the JIT then we may have just compiled the method after the
+ // IsQuickToInterpreterBridge check.
+ jit::Jit* const jit = Runtime::Current()->GetJit();
+ if (jit != nullptr &&
+ jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
+ return;
+ }
+
+ uint32_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(
+ EntryPointToCodePointer(code))[-1].code_size_;
+ uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
+ CHECK(code_start <= pc && pc <= (code_start + code_size))
+ << PrettyMethod(method)
+ << " pc=" << std::hex << pc
+ << " code=" << code
+ << " size=" << code_size;
+}
+
void StackVisitor::SanityCheckFrame() const {
if (kIsDebugBuild) {
ArtMethod* method = GetMethod();
@@ -859,9 +909,9 @@
}
}
if (cur_quick_frame_ != nullptr) {
- GetCurrentCode().AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
+ AssertPcIsWithinQuickCode(method, cur_quick_frame_pc_);
// Frame sanity.
- size_t frame_size = GetCurrentCode().GetFrameSizeInBytes();
+ size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
CHECK_NE(frame_size, 0u);
// A rough guess at an upper size we expect to see for a frame.
// 256 registers
@@ -871,13 +921,80 @@
// TODO: 083-compiler-regressions ManyFloatArgs shows this estimate is wrong.
// const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
const size_t kMaxExpectedFrameSize = 2 * KB;
- CHECK_LE(frame_size, kMaxExpectedFrameSize);
- size_t return_pc_offset = GetCurrentCode().GetReturnPcOffset().SizeValue();
+ CHECK_LE(frame_size, kMaxExpectedFrameSize) << PrettyMethod(method);
+ size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset();
CHECK_LT(return_pc_offset, frame_size);
}
}
}
+// Counts the number of references in the parameter list of the corresponding method.
+// Note: Thus does _not_ include "this" for non-static methods.
+static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t shorty_len;
+ const char* shorty = method->GetShorty(&shorty_len);
+ uint32_t refs = 0;
+ for (uint32_t i = 1; i < shorty_len ; ++i) {
+ if (shorty[i] == 'L') {
+ refs++;
+ }
+ }
+ return refs;
+}
+
+QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
+ if (cur_oat_quick_method_header_ != nullptr) {
+ return cur_oat_quick_method_header_->GetFrameInfo();
+ }
+
+ ArtMethod* method = GetMethod();
+ Runtime* runtime = Runtime::Current();
+
+ if (method->IsAbstract()) {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+
+ // This goes before IsProxyMethod since runtime methods have a null declaring class.
+ if (method->IsRuntimeMethod()) {
+ return runtime->GetRuntimeMethodFrameInfo(method);
+ }
+
+ // For Proxy method we add special handling for the direct method case (there is only one
+ // direct method - constructor). Direct method is cloned from original
+ // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
+ // quick compiled method without any stubs. So the frame info should be returned as it is a
+ // quick method not a stub. However, if instrumentation stubs are installed, the
+ // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
+ // oat code pointer, thus we have to add a special case here.
+ if (method->IsProxyMethod()) {
+ if (method->IsDirect()) {
+ CHECK(method->IsConstructor());
+ const void* code_pointer =
+ EntryPointToCodePointer(method->GetEntryPointFromQuickCompiledCode());
+ return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
+ } else {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+ }
+
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ DCHECK(method->IsNative());
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method, sizeof(void*));
+ DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << PrettyMethod(method);
+ // Generic JNI frame.
+ uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
+ size_t scope_size = HandleScope::SizeOf(handle_refs);
+ QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+
+ // Callee saves + handle scope + method ref + alignment
+ // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
+ size_t frame_size = RoundUp(
+ callee_info.FrameSizeInBytes() - sizeof(void*) + sizeof(ArtMethod*) + scope_size,
+ kStackAlignment);
+ return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+}
+
void StackVisitor::WalkStack(bool include_transitions) {
DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
CHECK_EQ(cur_depth_, 0U);
@@ -890,19 +1007,23 @@
cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
cur_quick_frame_ = current_fragment->GetTopQuickFrame();
cur_quick_frame_pc_ = 0;
+ cur_oat_quick_method_header_ = nullptr;
if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
ArtMethod* method = *cur_quick_frame_;
while (method != nullptr) {
+ cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
SanityCheckFrame();
if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
- && GetCurrentCode().IsOptimized(sizeof(void*))) {
- CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
+ && (cur_oat_quick_method_header_ != nullptr)
+ && cur_oat_quick_method_header_->IsOptimized()) {
+ CodeInfo code_info = cur_oat_quick_method_header_->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
- uint32_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
+ uint32_t native_pc_offset =
+ cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
@@ -925,14 +1046,16 @@
return;
}
+ QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
if (context_ != nullptr) {
- context_->FillCalleeSaves(*this);
+ context_->FillCalleeSaves(reinterpret_cast<uint8_t*>(cur_quick_frame_), frame_info);
}
- size_t frame_size = GetCurrentCode().GetFrameSizeInBytes();
// Compute PC for next stack frame from return PC.
- size_t return_pc_offset = GetCurrentCode().GetReturnPcOffset().SizeValue();
+ size_t frame_size = frame_info.FrameSizeInBytes();
+ size_t return_pc_offset = frame_size - sizeof(void*);
uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
+
if (UNLIKELY(exit_stubs_installed)) {
// While profiling, the return pc is restored from the side stack, except when walking
// the stack for an exception where the side stack will be unwound in VisitFrame.
@@ -963,7 +1086,6 @@
return_pc = instrumentation_frame.return_pc_;
}
}
- ArtCode code = GetCurrentCode();
cur_quick_frame_pc_ = return_pc;
uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
@@ -971,8 +1093,11 @@
if (kDebugStackWalk) {
LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size
- << " optimized=" << code.IsOptimized(sizeof(void*))
+ << std::boolalpha
+ << " optimized=" << (cur_oat_quick_method_header_ != nullptr &&
+ cur_oat_quick_method_header_->IsOptimized())
<< " native=" << method->IsNative()
+ << std::noboolalpha
<< " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
<< "," << method->GetEntryPointFromJni()
<< " next=" << *cur_quick_frame_;
diff --git a/runtime/stack.h b/runtime/stack.h
index 3e0566d..1276b24 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,13 +20,13 @@
#include <stdint.h>
#include <string>
-#include "art_code.h"
#include "arch/instruction_set.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "dex_file.h"
#include "gc_root.h"
#include "mirror/object_reference.h"
+#include "quick/quick_method_frame_info.h"
#include "read_barrier.h"
#include "verify_object.h"
@@ -40,6 +40,7 @@
class Context;
class HandleScope;
class InlineInfo;
+class OatQuickMethodHeader;
class ScopedObjectAccess;
class ShadowFrame;
class StackVisitor;
@@ -532,6 +533,9 @@
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
public:
virtual ~StackVisitor() {}
@@ -561,18 +565,6 @@
size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_);
- uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
- // Callee saves are held at the top of the frame
- DCHECK(GetMethod() != nullptr);
- uint8_t* save_addr =
- reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size - ((num + 1) * sizeof(void*));
-#if defined(__i386__) || defined(__x86_64__)
- save_addr -= sizeof(void*); // account for return address
-#endif
- return reinterpret_cast<uintptr_t*>(save_addr);
- }
-
// Returns the height of the stack in the managed stack frames, including transitions.
size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetNumFrames() - cur_depth_ - 1;
@@ -735,7 +727,11 @@
static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
- ArtCode GetCurrentCode() const { return ArtCode(cur_quick_frame_); }
+ const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const {
+ return cur_oat_quick_method_header_;
+ }
+
+ QuickMethodFrameInfo GetCurrentQuickFrameInfo() const SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Private constructor known in the case that num_frames_ has already been computed.
@@ -774,8 +770,6 @@
bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
SHARED_REQUIRES(Locks::mutator_lock_);
- bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
@@ -813,6 +807,7 @@
ShadowFrame* cur_shadow_frame_;
ArtMethod** cur_quick_frame_;
uintptr_t cur_quick_frame_pc_;
+ const OatQuickMethodHeader* cur_oat_quick_method_header_;
// Lazily computed, number of frames in the stack.
size_t num_frames_;
// Depth of the frame we're currently at.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8e0c288..b0cf418 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -32,7 +32,6 @@
#include <sstream>
#include "arch/context.h"
-#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_utils.h"
@@ -58,6 +57,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/stack_trace_element.h"
#include "monitor.h"
+#include "oat_quick_method_header.h"
#include "object_lock.h"
#include "quick_exception_handler.h"
#include "quick/quick_method_frame_info.h"
@@ -919,9 +919,9 @@
<< "]";
}
-void Thread::Dump(std::ostream& os) const {
+void Thread::Dump(std::ostream& os, BacktraceMap* backtrace_map) const {
DumpState(os);
- DumpStack(os);
+ DumpStack(os, backtrace_map);
}
mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
@@ -1480,7 +1480,7 @@
}
}
-void Thread::DumpStack(std::ostream& os) const {
+void Thread::DumpStack(std::ostream& os, BacktraceMap* backtrace_map) const {
// TODO: we call this code when dying but may not have suspended the thread ourself. The
// IsSuspended check is therefore racy with the use for dumping (normally we inhibit
// the race with the thread_suspend_count_lock_).
@@ -1496,8 +1496,7 @@
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort);
- ArtCode art_code(method);
- DumpNativeStack(os, GetTid(), " native: ", method, &art_code);
+ DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method);
}
DumpJavaStack(os);
} else {
@@ -2640,38 +2639,15 @@
VisitDeclaringClass(m);
DCHECK(m != nullptr);
size_t num_regs = shadow_frame->NumberOfVRegs();
- if (m->IsNative() || shadow_frame->HasReferenceArray()) {
- // handle scope for JNI or References for interpreter.
- for (size_t reg = 0; reg < num_regs; ++reg) {
- mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != nullptr) {
- mirror::Object* new_ref = ref;
- visitor_(&new_ref, reg, this);
- if (new_ref != ref) {
- shadow_frame->SetVRegReference(reg, new_ref);
- }
- }
- }
- } else {
- // Java method.
- // Portable path use DexGcMap and store in Method.native_gc_map_.
- const uint8_t* gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
- CHECK(gc_map != nullptr) << PrettyMethod(m);
- verifier::DexPcToReferenceMap dex_gc_map(gc_map);
- uint32_t dex_pc = shadow_frame->GetDexPC();
- const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
- DCHECK(reg_bitmap != nullptr);
- num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
- for (size_t reg = 0; reg < num_regs; ++reg) {
- if (TestBitmap(reg, reg_bitmap)) {
- mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != nullptr) {
- mirror::Object* new_ref = ref;
- visitor_(&new_ref, reg, this);
- if (new_ref != ref) {
- shadow_frame->SetVRegReference(reg, new_ref);
- }
- }
+ DCHECK(m->IsNative() || shadow_frame->HasReferenceArray());
+ // handle scope for JNI or References for interpreter.
+ for (size_t reg = 0; reg < num_regs; ++reg) {
+ mirror::Object* ref = shadow_frame->GetVRegReference(reg);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = ref;
+ visitor_(&new_ref, reg, this);
+ if (new_ref != ref) {
+ shadow_frame->SetVRegReference(reg, new_ref);
}
}
}
@@ -2702,11 +2678,12 @@
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ if (method_header->IsOptimized()) {
auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
reinterpret_cast<uintptr_t>(cur_quick_frame));
- uintptr_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
- CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
+ uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
+ CodeInfo code_info = method_header->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(map.IsValid());
@@ -2736,7 +2713,7 @@
}
}
} else {
- const uint8_t* native_gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
+ const uint8_t* native_gc_map = method_header->GetNativeGcMap();
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
// Can't be null or how would we compile its instructions?
@@ -2744,12 +2721,11 @@
NativePcOffsetToReferenceMap map(native_gc_map);
size_t num_regs = map.RegWidth() * 8;
if (num_regs > 0) {
- uintptr_t native_pc_offset =
- GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
+ uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
- const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
- QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
+ const VmapTable vmap_table(method_header->GetVmapTable());
+ QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
// For all dex registers in the bitmap
DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 8f3461a..138c143 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -42,6 +42,8 @@
#include "stack.h"
#include "thread_state.h"
+class BacktraceMap;
+
namespace art {
namespace gc {
@@ -184,7 +186,7 @@
void ShortDump(std::ostream& os) const;
// Dumps the detailed thread state and the thread stack (used for SIGQUIT).
- void Dump(std::ostream& os) const
+ void Dump(std::ostream& os, BacktraceMap* backtrace_map = nullptr) const
REQUIRES(!Locks::thread_suspend_count_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1042,7 +1044,7 @@
void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_);
void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
- void DumpStack(std::ostream& os) const
+ void DumpStack(std::ostream& os, BacktraceMap* backtrace_map = nullptr) const
REQUIRES(!Locks::thread_suspend_count_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 6176acd..bdd5d10 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -18,6 +18,7 @@
#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include <backtrace/BacktraceMap.h>
#include <cutils/trace.h>
#include <dirent.h>
#include <ScopedLocalRef.h>
@@ -109,9 +110,10 @@
void ThreadList::DumpNativeStacks(std::ostream& os) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
for (const auto& thread : list_) {
os << "DUMPING THREAD " << thread->GetTid() << "\n";
- DumpNativeStack(os, thread->GetTid(), "\t");
+ DumpNativeStack(os, thread->GetTid(), map.get(), "\t");
os << "\n";
}
}
@@ -138,7 +140,7 @@
// TODO: Reenable this when the native code in system_server can handle it.
// Currently "adb shell kill -3 `pid system_server`" will cause it to exit.
if (false) {
- DumpNativeStack(os, tid, " native: ");
+ DumpNativeStack(os, tid, nullptr, " native: ");
}
os << "\n";
}
@@ -175,7 +177,8 @@
// A closure used by Thread::Dump.
class DumpCheckpoint FINAL : public Closure {
public:
- explicit DumpCheckpoint(std::ostream* os) : os_(os), barrier_(0) {}
+ explicit DumpCheckpoint(std::ostream* os)
+ : os_(os), barrier_(0), backtrace_map_(BacktraceMap::Create(GetTid())) {}
void Run(Thread* thread) OVERRIDE {
// Note thread and self may not be equal if thread was already suspended at the point of the
@@ -184,7 +187,7 @@
std::ostringstream local_os;
{
ScopedObjectAccess soa(self);
- thread->Dump(local_os);
+ thread->Dump(local_os, backtrace_map_.get());
}
local_os << "\n";
{
@@ -213,6 +216,8 @@
std::ostream* const os_;
// The barrier to be passed through and for the requestor to wait upon.
Barrier barrier_;
+ // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
+ std::unique_ptr<BacktraceMap> backtrace_map_;
};
void ThreadList::Dump(std::ostream& os) {
@@ -1217,7 +1222,7 @@
std::string thread_name;
self->GetThreadName(thread_name);
std::ostringstream os;
- DumpNativeStack(os, GetTid(), " native: ", nullptr);
+ DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
break;
} else {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 40cd6d3..dee4f9c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -25,7 +25,6 @@
#include <unistd.h>
#include <memory>
-#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/stl_util.h"
@@ -37,6 +36,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string.h"
+#include "oat_quick_method_header.h"
#include "os.h"
#include "scoped_thread_state_change.h"
#include "utf-inl.h"
@@ -46,7 +46,9 @@
#include <sys/syscall.h>
#endif
-#include <backtrace/Backtrace.h> // For DumpNativeStack.
+// For DumpNativeStack.
+#include <backtrace/Backtrace.h>
+#include <backtrace/BacktraceMap.h>
#if defined(__linux__)
#include <linux/unistd.h>
@@ -1090,17 +1092,33 @@
map_src.c_str(), offset));
RunCommand(cmdline.c_str(), &os, prefix);
}
+
+static bool PcIsWithinQuickCode(ArtMethod* method, uintptr_t pc) NO_THREAD_SAFETY_ANALYSIS {
+ uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(
+ method->GetEntryPointFromQuickCompiledCode()));
+ if (code == 0) {
+ return pc == 0;
+ }
+ uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+ return code <= pc && pc <= (code + code_size);
+}
#endif
-void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
- ArtMethod* current_method, ArtCode* current_code, void* ucontext_ptr) {
+void DumpNativeStack(std::ostream& os, pid_t tid, BacktraceMap* existing_map, const char* prefix,
+ ArtMethod* current_method, void* ucontext_ptr) {
#if __linux__
// b/18119146
if (RUNNING_ON_MEMORY_TOOL != 0) {
return;
}
- std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
+ BacktraceMap* map = existing_map;
+ std::unique_ptr<BacktraceMap> tmp_map;
+ if (map == nullptr) {
+ tmp_map.reset(BacktraceMap::Create(tid));
+ map = tmp_map.get();
+ }
+ std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid, map));
if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n";
return;
@@ -1147,10 +1165,10 @@
os << "+" << it->func_offset;
}
try_addr2line = true;
- } else if (
- current_method != nullptr && Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
- current_code->PcIsWithinQuickCode(it->pc)) {
- const void* start_of_code = current_code->GetQuickOatEntryPoint(sizeof(void*));
+ } else if (current_method != nullptr &&
+ Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
+ PcIsWithinQuickCode(current_method, it->pc)) {
+ const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
os << JniLongName(current_method) << "+"
<< (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
} else {
@@ -1164,7 +1182,7 @@
}
}
#else
- UNUSED(os, tid, prefix, current_method, current_code, ucontext_ptr);
+ UNUSED(os, tid, existing_map, prefix, current_method, ucontext_ptr);
#endif
}
diff --git a/runtime/utils.h b/runtime/utils.h
index 457d43f..bd52b68 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -31,6 +31,8 @@
#include "globals.h"
#include "primitive.h"
+class BacktraceMap;
+
namespace art {
class ArtCode;
@@ -221,12 +223,19 @@
void SetThreadName(const char* thread_name);
// Dumps the native stack for thread 'tid' to 'os'.
-void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix = "",
- ArtMethod* current_method = nullptr, ArtCode* current_code = nullptr, void* ucontext = nullptr)
+void DumpNativeStack(std::ostream& os,
+ pid_t tid,
+ BacktraceMap* map = nullptr,
+ const char* prefix = "",
+ ArtMethod* current_method = nullptr,
+ void* ucontext = nullptr)
NO_THREAD_SAFETY_ANALYSIS;
// Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
-void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix = "", bool include_count = true);
+void DumpKernelStack(std::ostream& os,
+ pid_t tid,
+ const char* prefix = "",
+ bool include_count = true);
// Find $ANDROID_ROOT, /system, or abort.
const char* GetAndroidRoot();
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 2d9fd53..f52d011 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -38,6 +38,10 @@
return insn_flags_[index];
}
+inline InstructionFlags& MethodVerifier::GetInstructionFlags(size_t index) {
+ return insn_flags_[index];
+}
+
inline mirror::ClassLoader* MethodVerifier::GetClassLoader() {
return class_loader_.Get();
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 4051a1c..e1d4160 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -58,12 +58,14 @@
// On VLOG(verifier), should we dump the whole state when we run into a hard failure?
static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true;
+PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& arena)
+ : register_lines_(arena.Adapter(kArenaAllocVerifier)) {}
+
void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
uint32_t insns_size, uint16_t registers_size,
MethodVerifier* verifier) {
DCHECK_GT(insns_size, 0U);
- register_lines_.reset(new RegisterLine*[insns_size]());
- size_ = insns_size;
+ register_lines_.resize(insns_size);
for (uint32_t i = 0; i < insns_size; i++) {
bool interesting = false;
switch (mode) {
@@ -80,19 +82,12 @@
break;
}
if (interesting) {
- register_lines_[i] = RegisterLine::Create(registers_size, verifier);
+ register_lines_[i].reset(RegisterLine::Create(registers_size, verifier));
}
}
}
-PcToRegisterLineTable::~PcToRegisterLineTable() {
- for (size_t i = 0; i < size_; i++) {
- delete register_lines_[i];
- if (kIsDebugBuild) {
- register_lines_[i] = nullptr;
- }
- }
-}
+PcToRegisterLineTable::~PcToRegisterLineTable() {}
// Note: returns true on failure.
ALWAYS_INLINE static inline bool FailOrAbort(MethodVerifier* verifier, bool condition,
@@ -398,7 +393,10 @@
bool need_precise_constants, bool verify_to_dump,
bool allow_thread_suspension)
: self_(self),
- reg_types_(can_load_classes),
+ arena_stack_(Runtime::Current()->GetArenaPool()),
+ arena_(&arena_stack_),
+ reg_types_(can_load_classes, arena_),
+ reg_table_(arena_),
work_insn_idx_(DexFile::kDexNoIndex),
dex_method_idx_(dex_method_idx),
mirror_method_(method),
@@ -702,7 +700,11 @@
}
// Allocate and initialize an array to hold instruction data.
- insn_flags_.reset(new InstructionFlags[code_item_->insns_size_in_code_units_]());
+ insn_flags_.reset(arena_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
+ DCHECK(insn_flags_ != nullptr);
+ std::uninitialized_fill_n(insn_flags_.get(),
+ code_item_->insns_size_in_code_units_,
+ InstructionFlags());
// Run through the instructions and see if the width checks out.
bool result = ComputeWidthsAndCountOps();
// Flag instructions guarded by a "try" block and check exception handlers.
@@ -848,7 +850,7 @@
break;
}
size_t inst_size = inst->SizeInCodeUnits();
- insn_flags_[dex_pc].SetIsOpcode();
+ GetInstructionFlags(dex_pc).SetIsOpcode();
dex_pc += inst_size;
inst = inst->RelativeAt(inst_size);
}
@@ -881,7 +883,7 @@
<< " endAddr=" << end << " (size=" << insns_size << ")";
return false;
}
- if (!insn_flags_[start].IsOpcode()) {
+ if (!GetInstructionFlags(start).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "'try' block starts inside an instruction (" << start << ")";
return false;
@@ -889,7 +891,7 @@
uint32_t dex_pc = start;
const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
while (dex_pc < end) {
- insn_flags_[dex_pc].SetInTry();
+ GetInstructionFlags(dex_pc).SetInTry();
size_t insn_size = inst->SizeInCodeUnits();
dex_pc += insn_size;
inst = inst->RelativeAt(insn_size);
@@ -903,7 +905,7 @@
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t dex_pc= iterator.GetHandlerAddress();
- if (!insn_flags_[dex_pc].IsOpcode()) {
+ if (!GetInstructionFlags(dex_pc).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "exception handler starts at bad address (" << dex_pc << ")";
return false;
@@ -913,7 +915,7 @@
<< "exception handler begins with move-result* (" << dex_pc << ")";
return false;
}
- insn_flags_[dex_pc].SetBranchTarget();
+ GetInstructionFlags(dex_pc).SetBranchTarget();
// Ensure exception types are resolved so that they don't need resolution to be delivered,
// unresolved exception types will be ignored by exception delivery
if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
@@ -935,8 +937,8 @@
const Instruction* inst = Instruction::At(code_item_->insns_);
/* Flag the start of the method as a branch target, and a GC point due to stack overflow errors */
- insn_flags_[0].SetBranchTarget();
- insn_flags_[0].SetCompileTimeInfoPoint();
+ GetInstructionFlags(0).SetBranchTarget();
+ GetInstructionFlags(0).SetCompileTimeInfoPoint();
uint32_t insns_size = code_item_->insns_size_in_code_units_;
for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
@@ -948,18 +950,18 @@
// All invoke points are marked as "Throw" points already.
// We are relying on this to also count all the invokes as interesting.
if (inst->IsBranch()) {
- insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+ GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
// The compiler also needs safepoints for fall-through to loop heads.
// Such a loop head must be a target of a branch.
int32_t offset = 0;
bool cond, self_ok;
bool target_ok = GetBranchOffset(dex_pc, &offset, &cond, &self_ok);
DCHECK(target_ok);
- insn_flags_[dex_pc + offset].SetCompileTimeInfoPoint();
+ GetInstructionFlags(dex_pc + offset).SetCompileTimeInfoPoint();
} else if (inst->IsSwitch() || inst->IsThrow()) {
- insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+ GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
} else if (inst->IsReturn()) {
- insn_flags_[dex_pc].SetCompileTimeInfoPointAndReturn();
+ GetInstructionFlags(dex_pc).SetCompileTimeInfoPointAndReturn();
}
dex_pc += inst->SizeInCodeUnits();
inst = inst->Next();
@@ -1202,7 +1204,7 @@
}
// Make sure the array-data is marked as an opcode. This ensures that it was reached when
// traversing the code item linearly. It is an approximation for a by-spec padding value.
- if (!insn_flags_[cur_offset + array_data_offset].IsOpcode()) {
+ if (!GetInstructionFlags(cur_offset + array_data_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array data table at " << cur_offset
<< ", data offset " << array_data_offset
<< " not correctly visited, probably bad padding.";
@@ -1245,13 +1247,13 @@
int32_t abs_offset = cur_offset + offset;
if (abs_offset < 0 ||
(uint32_t) abs_offset >= insn_count ||
- !insn_flags_[abs_offset].IsOpcode()) {
+ !GetInstructionFlags(abs_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> "
<< reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset);
return false;
}
- insn_flags_[abs_offset].SetBranchTarget();
+ GetInstructionFlags(abs_offset).SetBranchTarget();
return true;
}
@@ -1315,7 +1317,7 @@
}
// Make sure the switch data is marked as an opcode. This ensures that it was reached when
// traversing the code item linearly. It is an approximation for a by-spec padding value.
- if (!insn_flags_[cur_offset + switch_offset].IsOpcode()) {
+ if (!GetInstructionFlags(cur_offset + switch_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "switch table at " << cur_offset
<< ", switch offset " << switch_offset
<< " not correctly visited, probably bad padding.";
@@ -1387,14 +1389,14 @@
int32_t abs_offset = cur_offset + offset;
if (abs_offset < 0 ||
abs_offset >= static_cast<int32_t>(insn_count) ||
- !insn_flags_[abs_offset].IsOpcode()) {
+ !GetInstructionFlags(abs_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
<< " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset)
<< "[" << targ << "]";
return false;
}
- insn_flags_[abs_offset].SetBranchTarget();
+ GetInstructionFlags(abs_offset).SetBranchTarget();
}
return true;
}
@@ -1435,7 +1437,6 @@
registers_size,
this);
-
work_line_.reset(RegisterLine::Create(registers_size, this));
saved_line_.reset(RegisterLine::Create(registers_size, this));
@@ -1491,7 +1492,7 @@
vios->Stream() << reg_line->Dump(this) << "\n";
}
vios->Stream()
- << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
+ << StringPrintf("0x%04zx", dex_pc) << ": " << GetInstructionFlags(dex_pc).ToString() << " ";
const bool kDumpHexOfInstruction = false;
if (kDumpHexOfInstruction) {
vios->Stream() << inst->DumpHex(5) << " ";
@@ -1677,7 +1678,7 @@
const uint32_t insns_size = code_item_->insns_size_in_code_units_;
/* Begin by marking the first instruction as "changed". */
- insn_flags_[0].SetChanged();
+ GetInstructionFlags(0).SetChanged();
uint32_t start_guess = 0;
/* Continue until no instructions are marked "changed". */
@@ -1688,7 +1689,7 @@
// Find the first marked one. Use "start_guess" as a way to find one quickly.
uint32_t insn_idx = start_guess;
for (; insn_idx < insns_size; insn_idx++) {
- if (insn_flags_[insn_idx].IsChanged())
+ if (GetInstructionFlags(insn_idx).IsChanged())
break;
}
if (insn_idx == insns_size) {
@@ -1708,7 +1709,7 @@
// situation where we have a stray "changed" flag set on an instruction that isn't a branch
// target.
work_insn_idx_ = insn_idx;
- if (insn_flags_[insn_idx].IsBranchTarget()) {
+ if (GetInstructionFlags(insn_idx).IsBranchTarget()) {
work_line_->CopyFromLine(reg_table_.GetLine(insn_idx));
} else if (kIsDebugBuild) {
/*
@@ -1734,8 +1735,8 @@
return false;
}
/* Clear "changed" and mark as visited. */
- insn_flags_[insn_idx].SetVisited();
- insn_flags_[insn_idx].ClearChanged();
+ GetInstructionFlags(insn_idx).SetVisited();
+ GetInstructionFlags(insn_idx).ClearChanged();
}
if (gDebugVerify) {
@@ -1762,10 +1763,10 @@
(insns[insn_idx + 1] == Instruction::kPackedSwitchSignature ||
insns[insn_idx + 1] == Instruction::kSparseSwitchSignature ||
insns[insn_idx + 1] == Instruction::kArrayDataSignature))) {
- insn_flags_[insn_idx].SetVisited();
+ GetInstructionFlags(insn_idx).SetVisited();
}
- if (!insn_flags_[insn_idx].IsVisited()) {
+ if (!GetInstructionFlags(insn_idx).IsVisited()) {
if (dead_start < 0)
dead_start = insn_idx;
} else if (dead_start >= 0) {
@@ -1895,8 +1896,8 @@
// We need to ensure the work line is consistent while performing validation. When we spot a
// peephole pattern we compute a new line for either the fallthrough instruction or the
// branch target.
- std::unique_ptr<RegisterLine> branch_line;
- std::unique_ptr<RegisterLine> fallthrough_line;
+ ArenaUniquePtr<RegisterLine> branch_line;
+ ArenaUniquePtr<RegisterLine> fallthrough_line;
switch (inst->Opcode()) {
case Instruction::NOP:
@@ -2144,9 +2145,9 @@
work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
// Check whether the previous instruction is a move-object with vAA as a source, creating
// untracked lock aliasing.
- if (0 != work_insn_idx_ && !insn_flags_[work_insn_idx_].IsBranchTarget()) {
+ if (0 != work_insn_idx_ && !GetInstructionFlags(work_insn_idx_).IsBranchTarget()) {
uint32_t prev_idx = work_insn_idx_ - 1;
- while (0 != prev_idx && !insn_flags_[prev_idx].IsOpcode()) {
+ while (0 != prev_idx && !GetInstructionFlags(prev_idx).IsOpcode()) {
prev_idx--;
}
const Instruction* prev_inst = Instruction::At(code_item_->insns_ + prev_idx);
@@ -2427,10 +2428,10 @@
uint32_t instance_of_idx = 0;
if (0 != work_insn_idx_) {
instance_of_idx = work_insn_idx_ - 1;
- while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
+ while (0 != instance_of_idx && !GetInstructionFlags(instance_of_idx).IsOpcode()) {
instance_of_idx--;
}
- if (FailOrAbort(this, insn_flags_[instance_of_idx].IsOpcode(),
+ if (FailOrAbort(this, GetInstructionFlags(instance_of_idx).IsOpcode(),
"Unable to get previous instruction of if-eqz/if-nez for work index ",
work_insn_idx_)) {
break;
@@ -2486,15 +2487,15 @@
update_line->SetRegisterType<LockOp::kKeep>(this,
instance_of_inst->VRegB_22c(),
cast_type);
- if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
+ if (!GetInstructionFlags(instance_of_idx).IsBranchTarget() && 0 != instance_of_idx) {
// See if instance-of was preceded by a move-object operation, common due to the small
// register encoding space of instance-of, and propagate type information to the source
// of the move-object.
uint32_t move_idx = instance_of_idx - 1;
- while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
+ while (0 != move_idx && !GetInstructionFlags(move_idx).IsOpcode()) {
move_idx--;
}
- if (FailOrAbort(this, insn_flags_[move_idx].IsOpcode(),
+ if (FailOrAbort(this, GetInstructionFlags(move_idx).IsOpcode(),
"Unable to get previous instruction of if-eqz/if-nez for work index ",
work_insn_idx_)) {
break;
@@ -2786,8 +2787,7 @@
work_line_->MarkRefsAsInitialized(this, this_type, this_reg, work_insn_idx_);
}
if (return_type == nullptr) {
- return_type = ®_types_.FromDescriptor(GetClassLoader(), return_type_descriptor,
- false);
+ return_type = ®_types_.FromDescriptor(GetClassLoader(), return_type_descriptor, false);
}
if (!return_type->IsLowHalf()) {
work_line_->SetResultRegisterType(this, *return_type);
@@ -2860,7 +2860,7 @@
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
- descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+ descriptor = dex_file_->StringByTypeIdx(return_type_idx);
} else {
descriptor = abs_method->GetReturnTypeDescriptor();
}
@@ -3309,7 +3309,7 @@
return false;
}
/* update branch target, set "changed" if appropriate */
- if (nullptr != branch_line.get()) {
+ if (nullptr != branch_line) {
if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get(), false)) {
return false;
}
@@ -3364,7 +3364,7 @@
* Handle instructions that can throw and that are sitting in a "try" block. (If they're not in a
* "try" block when they throw, control transfers out of the method.)
*/
- if ((opcode_flags & Instruction::kThrow) != 0 && insn_flags_[work_insn_idx_].IsInTry()) {
+ if ((opcode_flags & Instruction::kThrow) != 0 && GetInstructionFlags(work_insn_idx_).IsInTry()) {
bool has_catch_all_handler = false;
CatchHandlerIterator iterator(*code_item_, work_insn_idx_);
@@ -3434,11 +3434,11 @@
if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
return false;
}
- if (nullptr != fallthrough_line.get()) {
+ if (nullptr != fallthrough_line) {
// Make workline consistent with fallthrough computed from peephole optimization.
work_line_->CopyFromLine(fallthrough_line.get());
}
- if (insn_flags_[next_insn_idx].IsReturn()) {
+ if (GetInstructionFlags(next_insn_idx).IsReturn()) {
// For returns we only care about the operand to the return, all other registers are dead.
const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
AdjustReturnLine(this, ret_inst, work_line_.get());
@@ -3456,7 +3456,7 @@
* We're not recording register data for the next instruction, so we don't know what the
* prior state was. We have to assume that something has changed and re-evaluate it.
*/
- insn_flags_[next_insn_idx].SetChanged();
+ GetInstructionFlags(next_insn_idx).SetChanged();
}
}
@@ -3480,7 +3480,7 @@
}
DCHECK_LT(*start_guess, code_item_->insns_size_in_code_units_);
- DCHECK(insn_flags_[*start_guess].IsOpcode());
+ DCHECK(GetInstructionFlags(*start_guess).IsOpcode());
if (have_pending_runtime_throw_failure_) {
have_any_pending_runtime_throw_failure_ = true;
@@ -3491,30 +3491,55 @@
return true;
} // NOLINT(readability/fn_size)
+void MethodVerifier::UninstantiableError(const char* descriptor) {
+ Fail(VerifyError::VERIFY_ERROR_NO_CLASS) << "Could not create precise reference for "
+ << "non-instantiable klass " << descriptor;
+}
+
+inline bool MethodVerifier::IsInstantiableOrPrimitive(mirror::Class* klass) {
+ return klass->IsInstantiable() || klass->IsPrimitive();
+}
+
const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
- const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
- const RegType& referrer = GetDeclaringClass();
mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
- const RegType& result = klass != nullptr ?
- FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()) :
- reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
- if (result.IsConflict()) {
- Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
- << "' in " << referrer;
- return result;
+ const RegType* result = nullptr;
+ if (klass != nullptr) {
+ bool precise = klass->CannotBeAssignedFromOtherTypes();
+ if (precise && !IsInstantiableOrPrimitive(klass)) {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ UninstantiableError(descriptor);
+ precise = false;
+ }
+ result = reg_types_.FindClass(klass, precise);
+ if (result == nullptr) {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ result = reg_types_.InsertClass(descriptor, klass, precise);
+ }
+ } else {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ result = ®_types_.FromDescriptor(GetClassLoader(), descriptor, false);
}
- if (klass == nullptr && !result.IsUnresolvedTypes()) {
- dex_cache_->SetResolvedType(class_idx, result.GetClass());
+ DCHECK(result != nullptr);
+ if (result->IsConflict()) {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
+ << "' in " << GetDeclaringClass();
+ return *result;
+ }
+ if (klass == nullptr && !result->IsUnresolvedTypes()) {
+ dex_cache_->SetResolvedType(class_idx, result->GetClass());
}
// Check if access is allowed. Unresolved types use xxxWithAccessCheck to
// check at runtime if access is allowed and so pass here. If result is
// primitive, skip the access check.
- if (result.IsNonZeroReferenceTypes() && !result.IsUnresolvedTypes() &&
- !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
- Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
- << referrer << "' -> '" << result << "'";
+ if (result->IsNonZeroReferenceTypes() && !result->IsUnresolvedTypes()) {
+ const RegType& referrer = GetDeclaringClass();
+ if (!referrer.IsUnresolvedTypes() && !referrer.CanAccess(*result)) {
+ Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
+ << referrer << "' -> '" << result << "'";
+ }
}
- return result;
+ return *result;
}
const RegType& MethodVerifier::GetCaughtExceptionType() {
@@ -3720,9 +3745,10 @@
} else {
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const uint16_t class_idx = dex_file_->GetMethodId(method_idx).class_idx_;
- res_method_class = ®_types_.FromDescriptor(GetClassLoader(),
- dex_file_->StringByTypeIdx(class_idx),
- false);
+ res_method_class = ®_types_.FromDescriptor(
+ GetClassLoader(),
+ dex_file_->StringByTypeIdx(class_idx),
+ false);
}
if (!res_method_class->IsAssignableFrom(actual_arg_type)) {
Fail(actual_arg_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS:
@@ -4476,14 +4502,16 @@
field->GetType<false>();
if (field_type_class != nullptr) {
- field_type = &FromClass(field->GetTypeDescriptor(), field_type_class,
+ field_type = &FromClass(field->GetTypeDescriptor(),
+ field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
} else {
Thread* self = Thread::Current();
DCHECK(!can_load_classes_ || self->IsExceptionPending());
self->ClearException();
field_type = ®_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
- field->GetTypeDescriptor(), false);
+ field->GetTypeDescriptor(),
+ false);
}
if (field_type == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field type from " << inst->Name();
@@ -4604,14 +4632,14 @@
bool update_merge_line) {
bool changed = true;
RegisterLine* target_line = reg_table_.GetLine(next_insn);
- if (!insn_flags_[next_insn].IsVisitedOrChanged()) {
+ if (!GetInstructionFlags(next_insn).IsVisitedOrChanged()) {
/*
* We haven't processed this instruction before, and we haven't touched the registers here, so
* there's nothing to "merge". Copy the registers over and mark it as changed. (This is the
* only way a register can transition out of "unknown", so this is not just an optimization.)
*/
target_line->CopyFromLine(merge_line);
- if (insn_flags_[next_insn].IsReturn()) {
+ if (GetInstructionFlags(next_insn).IsReturn()) {
// Verify that the monitor stack is empty on return.
merge_line->VerifyMonitorStackEmpty(this);
@@ -4621,10 +4649,9 @@
AdjustReturnLine(this, ret_inst, target_line);
}
} else {
- std::unique_ptr<RegisterLine> copy(gDebugVerify ?
- RegisterLine::Create(target_line->NumRegs(), this) :
- nullptr);
+ ArenaUniquePtr<RegisterLine> copy;
if (gDebugVerify) {
+ copy.reset(RegisterLine::Create(target_line->NumRegs(), this));
copy->CopyFromLine(target_line);
}
changed = target_line->MergeRegisters(this, merge_line);
@@ -4643,13 +4670,13 @@
}
}
if (changed) {
- insn_flags_[next_insn].SetChanged();
+ GetInstructionFlags(next_insn).SetChanged();
}
return true;
}
InstructionFlags* MethodVerifier::CurrentInsnFlags() {
- return &insn_flags_[work_insn_idx_];
+ return &GetInstructionFlags(work_insn_idx_);
}
const RegType& MethodVerifier::GetMethodReturnType() {
@@ -4685,8 +4712,7 @@
= dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
if (mirror_method_ != nullptr) {
mirror::Class* klass = mirror_method_->GetDeclaringClass();
- declaring_class_ = &FromClass(descriptor, klass,
- klass->CannotBeAssignedFromOtherTypes());
+ declaring_class_ = &FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
} else {
declaring_class_ = ®_types_.FromDescriptor(GetClassLoader(), descriptor, false);
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index ba694b7..7b51d6e 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -21,7 +21,10 @@
#include <sstream>
#include <vector>
+#include "base/arena_allocator.h"
#include "base/macros.h"
+#include "base/scoped_arena_containers.h"
+#include "base/stl_util.h"
#include "dex_file.h"
#include "handle.h"
#include "instruction_flags.h"
@@ -107,7 +110,7 @@
// execution of that instruction.
class PcToRegisterLineTable {
public:
- PcToRegisterLineTable() : size_(0) {}
+ explicit PcToRegisterLineTable(ScopedArenaAllocator& arena);
~PcToRegisterLineTable();
// Initialize the RegisterTable. Every instruction address can have a different set of information
@@ -116,14 +119,12 @@
void Init(RegisterTrackingMode mode, InstructionFlags* flags, uint32_t insns_size,
uint16_t registers_size, MethodVerifier* verifier);
- RegisterLine* GetLine(size_t idx) {
- DCHECK_LT(idx, size_);
- return register_lines_[idx];
+ RegisterLine* GetLine(size_t idx) const {
+ return register_lines_[idx].get();
}
private:
- std::unique_ptr<RegisterLine*[]> register_lines_;
- size_t size_;
+ ScopedArenaVector<ArenaUniquePtr<RegisterLine>> register_lines_;
DISALLOW_COPY_AND_ASSIGN(PcToRegisterLineTable);
};
@@ -240,7 +241,8 @@
// Accessors used by the compiler via CompilerCallback
const DexFile::CodeItem* CodeItem() const;
RegisterLine* GetRegLine(uint32_t dex_pc);
- const InstructionFlags& GetInstructionFlags(size_t index) const;
+ ALWAYS_INLINE const InstructionFlags& GetInstructionFlags(size_t index) const;
+ ALWAYS_INLINE InstructionFlags& GetInstructionFlags(size_t index);
mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
MethodReference GetMethodReference() const;
@@ -275,7 +277,14 @@
return IsConstructor() && !IsStatic();
}
+ ScopedArenaAllocator& GetArena() {
+ return arena_;
+ }
+
private:
+ void UninstantiableError(const char* descriptor);
+ static bool IsInstantiableOrPrimitive(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+
// Is the method being verified a constructor? See the comment on the field.
bool IsConstructor() const {
return is_constructor_;
@@ -687,19 +696,23 @@
// The thread we're verifying on.
Thread* const self_;
+ // Arena allocator.
+ ArenaStack arena_stack_;
+ ScopedArenaAllocator arena_;
+
RegTypeCache reg_types_;
PcToRegisterLineTable reg_table_;
// Storage for the register status we're currently working on.
- std::unique_ptr<RegisterLine> work_line_;
+ ArenaUniquePtr<RegisterLine> work_line_;
// The address of the instruction we're currently working on, note that this is in 2 byte
// quantities
uint32_t work_insn_idx_;
// Storage for the register status we're saving for later.
- std::unique_ptr<RegisterLine> saved_line_;
+ ArenaUniquePtr<RegisterLine> saved_line_;
const uint32_t dex_method_idx_; // The method we're working on.
// Its object representation if known.
@@ -715,7 +728,8 @@
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
- std::unique_ptr<InstructionFlags[]> insn_flags_;
+ // Owned, but not unique_ptr since insn_flags_ are allocated in arenas.
+ ArenaUniquePtr<InstructionFlags[]> insn_flags_;
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
uint32_t interesting_dex_pc_;
// The container into which FindLocksAtDexPc should write the registers containing held locks,
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index f445132..11a53e5 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -20,6 +20,7 @@
#include "reg_type.h"
#include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
#include "mirror/class.h"
namespace art {
@@ -180,6 +181,10 @@
return instance_;
}
+inline void* RegType::operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kArenaAllocMisc);
+}
+
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index b86a4c8..16cab03 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -16,6 +16,7 @@
#include "reg_type-inl.h"
+#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
#include "base/casts.h"
#include "class_linker-inl.h"
@@ -46,20 +47,17 @@
const DoubleHiType* DoubleHiType::instance_ = nullptr;
const IntegerType* IntegerType::instance_ = nullptr;
-PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+PrimitiveType::PrimitiveType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
: RegType(klass, descriptor, cache_id) {
CHECK(klass != nullptr);
CHECK(!descriptor.empty());
}
-Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+Cat1Type::Cat1Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
: PrimitiveType(klass, descriptor, cache_id) {
}
-Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+Cat2Type::Cat2Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
: PrimitiveType(klass, descriptor, cache_id) {
}
@@ -121,11 +119,11 @@
}
std::string IntegerType::Dump() const {
- return "Integer";
+ return "Integer";
}
const DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new DoubleHiType(klass, descriptor, cache_id);
@@ -140,7 +138,7 @@
}
const DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new DoubleLoType(klass, descriptor, cache_id);
@@ -154,14 +152,14 @@
}
}
-const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new LongLoType(klass, descriptor, cache_id);
return instance_;
}
-const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new LongHiType(klass, descriptor, cache_id);
@@ -182,7 +180,7 @@
}
}
-const FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const FloatType* FloatType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new FloatType(klass, descriptor, cache_id);
@@ -196,7 +194,7 @@
}
}
-const CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const CharType* CharType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new CharType(klass, descriptor, cache_id);
@@ -210,7 +208,7 @@
}
}
-const ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ShortType* ShortType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new ShortType(klass, descriptor, cache_id);
@@ -224,7 +222,7 @@
}
}
-const ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ByteType* ByteType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new ByteType(klass, descriptor, cache_id);
@@ -238,7 +236,7 @@
}
}
-const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new IntegerType(klass, descriptor, cache_id);
@@ -253,7 +251,7 @@
}
const ConflictType* ConflictType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new ConflictType(klass, descriptor, cache_id);
@@ -267,7 +265,7 @@
}
}
-const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(BooleanType::instance_ == nullptr);
instance_ = new BooleanType(klass, descriptor, cache_id);
@@ -286,7 +284,7 @@
}
const UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new UndefinedType(klass, descriptor, cache_id);
@@ -300,7 +298,7 @@
}
}
-PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id)
: RegType(klass, descriptor, cache_id) {
// Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError
@@ -335,14 +333,14 @@
std::string UnresolvedReferenceType::Dump() const {
std::stringstream result;
- result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().c_str());
+ result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().as_string().c_str());
return result.str();
}
std::string UnresolvedUninitializedRefType::Dump() const {
std::stringstream result;
result << "Unresolved And Uninitialized Reference" << ": "
- << PrettyDescriptor(GetDescriptor().c_str())
+ << PrettyDescriptor(GetDescriptor().as_string().c_str())
<< " Allocation PC: " << GetAllocationPc();
return result.str();
}
@@ -350,7 +348,7 @@
std::string UnresolvedUninitializedThisRefType::Dump() const {
std::stringstream result;
result << "Unresolved And Uninitialized This Reference"
- << PrettyDescriptor(GetDescriptor().c_str());
+ << PrettyDescriptor(GetDescriptor().as_string().c_str());
return result.str();
}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 2834a9a..80b751c 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -22,9 +22,11 @@
#include <set>
#include <string>
+#include "base/arena_object.h"
#include "base/bit_vector.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/stringpiece.h"
#include "gc_root.h"
#include "handle_scope.h"
#include "object_callbacks.h"
@@ -35,6 +37,9 @@
class Class;
} // namespace mirror
+class ArenaBitVector;
+class ScopedArenaAllocator;
+
namespace verifier {
class RegTypeCache;
@@ -173,7 +178,7 @@
bool IsJavaLangObjectArray() const
SHARED_REQUIRES(Locks::mutator_lock_);
bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
- const std::string& GetDescriptor() const {
+ const StringPiece& GetDescriptor() const {
DCHECK(HasClass() ||
(IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
!IsUnresolvedSuperClass()));
@@ -274,10 +279,20 @@
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
SHARED_REQUIRES(Locks::mutator_lock_);
+ static void* operator new(size_t size) noexcept {
+ return ::operator new(size);
+ }
+
+ static void* operator new(size_t size, ArenaAllocator* arena) = delete;
+ static void* operator new(size_t size, ScopedArenaAllocator* arena);
+
protected:
- RegType(mirror::Class* klass, const std::string& descriptor,
+ RegType(mirror::Class* klass,
+ const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
- : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+ : descriptor_(descriptor),
+ klass_(klass),
+ cache_id_(cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -285,9 +300,8 @@
void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
- const std::string descriptor_;
- mutable GcRoot<mirror::Class>
- klass_; // Non-const only due to moving classes.
+ const StringPiece descriptor_;
+ mutable GcRoot<mirror::Class> klass_; // Non-const only due to moving classes.
const uint16_t cache_id_;
friend class RegTypeCache;
@@ -311,7 +325,7 @@
// Create the singleton instance.
static const ConflictType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -319,7 +333,7 @@
static void Destroy();
private:
- ConflictType(mirror::Class* klass, const std::string& descriptor,
+ ConflictType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
@@ -340,7 +354,7 @@
// Create the singleton instance.
static const UndefinedType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -348,7 +362,7 @@
static void Destroy();
private:
- UndefinedType(mirror::Class* klass, const std::string& descriptor,
+ UndefinedType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
@@ -357,7 +371,7 @@
class PrimitiveType : public RegType {
public:
- PrimitiveType(mirror::Class* klass, const std::string& descriptor,
+ PrimitiveType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
bool HasClassVirtual() const OVERRIDE { return true; }
@@ -365,7 +379,7 @@
class Cat1Type : public PrimitiveType {
public:
- Cat1Type(mirror::Class* klass, const std::string& descriptor,
+ Cat1Type(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
};
@@ -374,14 +388,14 @@
bool IsInteger() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const IntegerType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const IntegerType* GetInstance() PURE;
static void Destroy();
private:
- IntegerType(mirror::Class* klass, const std::string& descriptor,
+ IntegerType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const IntegerType* instance_;
@@ -392,14 +406,14 @@
bool IsBoolean() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const BooleanType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const BooleanType* GetInstance() PURE;
static void Destroy();
private:
- BooleanType(mirror::Class* klass, const std::string& descriptor,
+ BooleanType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
@@ -411,14 +425,14 @@
bool IsByte() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const ByteType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const ByteType* GetInstance() PURE;
static void Destroy();
private:
- ByteType(mirror::Class* klass, const std::string& descriptor,
+ ByteType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ByteType* instance_;
@@ -429,14 +443,14 @@
bool IsShort() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const ShortType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const ShortType* GetInstance() PURE;
static void Destroy();
private:
- ShortType(mirror::Class* klass, const std::string& descriptor,
+ ShortType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ShortType* instance_;
@@ -447,14 +461,14 @@
bool IsChar() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const CharType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const CharType* GetInstance() PURE;
static void Destroy();
private:
- CharType(mirror::Class* klass, const std::string& descriptor,
+ CharType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const CharType* instance_;
@@ -465,14 +479,14 @@
bool IsFloat() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const FloatType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const FloatType* GetInstance() PURE;
static void Destroy();
private:
- FloatType(mirror::Class* klass, const std::string& descriptor,
+ FloatType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const FloatType* instance_;
@@ -480,7 +494,7 @@
class Cat2Type : public PrimitiveType {
public:
- Cat2Type(mirror::Class* klass, const std::string& descriptor,
+ Cat2Type(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
};
@@ -490,14 +504,14 @@
bool IsLongLo() const OVERRIDE { return true; }
bool IsLong() const OVERRIDE { return true; }
static const LongLoType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const LongLoType* GetInstance() PURE;
static void Destroy();
private:
- LongLoType(mirror::Class* klass, const std::string& descriptor,
+ LongLoType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongLoType* instance_;
@@ -508,14 +522,14 @@
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
bool IsLongHi() const OVERRIDE { return true; }
static const LongHiType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const LongHiType* GetInstance() PURE;
static void Destroy();
private:
- LongHiType(mirror::Class* klass, const std::string& descriptor,
+ LongHiType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongHiType* instance_;
@@ -527,14 +541,14 @@
bool IsDoubleLo() const OVERRIDE { return true; }
bool IsDouble() const OVERRIDE { return true; }
static const DoubleLoType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const DoubleLoType* GetInstance() PURE;
static void Destroy();
private:
- DoubleLoType(mirror::Class* klass, const std::string& descriptor,
+ DoubleLoType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleLoType* instance_;
@@ -545,14 +559,14 @@
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
virtual bool IsDoubleHi() const OVERRIDE { return true; }
static const DoubleHiType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const DoubleHiType* GetInstance() PURE;
static void Destroy();
private:
- DoubleHiType(mirror::Class* klass, const std::string& descriptor,
+ DoubleHiType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleHiType* instance_;
@@ -677,7 +691,7 @@
// instructions and must be passed to a constructor.
class UninitializedType : public RegType {
public:
- UninitializedType(mirror::Class* klass, const std::string& descriptor,
+ UninitializedType(mirror::Class* klass, const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
: RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
@@ -697,7 +711,7 @@
class UninitializedReferenceType FINAL : public UninitializedType {
public:
UninitializedReferenceType(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
@@ -713,7 +727,7 @@
// constructor.
class UnresolvedUninitializedRefType FINAL : public UninitializedType {
public:
- UnresolvedUninitializedRefType(const std::string& descriptor,
+ UnresolvedUninitializedRefType(const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
@@ -737,7 +751,7 @@
class UninitializedThisReferenceType FINAL : public UninitializedType {
public:
UninitializedThisReferenceType(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, 0, cache_id) {
@@ -758,7 +772,7 @@
class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
public:
- UnresolvedUninitializedThisRefType(const std::string& descriptor,
+ UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, 0, cache_id) {
@@ -781,7 +795,7 @@
// sub-class.
class ReferenceType FINAL : public RegType {
public:
- ReferenceType(mirror::Class* klass, const std::string& descriptor,
+ ReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
@@ -799,7 +813,7 @@
// type.
class PreciseReferenceType FINAL : public RegType {
public:
- PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+ PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -815,7 +829,7 @@
// Common parent of unresolved types.
class UnresolvedType : public RegType {
public:
- UnresolvedType(const std::string& descriptor, uint16_t cache_id)
+ UnresolvedType(const StringPiece& descriptor, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(nullptr, descriptor, cache_id) {}
@@ -827,7 +841,7 @@
// of this type must be conservative.
class UnresolvedReferenceType FINAL : public UnresolvedType {
public:
- UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
+ UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UnresolvedType(descriptor, cache_id) {
if (kIsDebugBuild) {
@@ -882,8 +896,10 @@
class UnresolvedMergedType FINAL : public UnresolvedType {
public:
// Note: the constructor will copy the unresolved BitVector, not use it directly.
- UnresolvedMergedType(const RegType& resolved, const BitVector& unresolved,
- const RegTypeCache* reg_type_cache, uint16_t cache_id)
+ UnresolvedMergedType(const RegType& resolved,
+ const BitVector& unresolved,
+ const RegTypeCache* reg_type_cache,
+ uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
// The resolved part. See description below.
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index b6f253b..68af62e 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -118,6 +118,18 @@
}
}
+template <class RegTypeType>
+inline RegTypeType& RegTypeCache::AddEntry(RegTypeType* new_entry) {
+ DCHECK(new_entry != nullptr);
+ entries_.push_back(new_entry);
+ if (new_entry->HasClass()) {
+ mirror::Class* klass = new_entry->GetClass();
+ DCHECK(!klass->IsPrimitive());
+ klass_entries_.push_back(std::make_pair(GcRoot<mirror::Class>(klass), new_entry));
+ }
+ return *new_entry;
+}
+
} // namespace verifier
} // namespace art
#endif // ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index bb756e9..71ed4a2 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -16,7 +16,9 @@
#include "reg_type_cache-inl.h"
+#include "base/arena_bit_vector.h"
#include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -29,9 +31,10 @@
bool RegTypeCache::primitive_initialized_ = false;
uint16_t RegTypeCache::primitive_count_ = 0;
-const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant -
+ kMinSmallConstant + 1];
-static bool MatchingPrecisionForClass(const RegType* entry, bool precise)
+ALWAYS_INLINE static inline bool MatchingPrecisionForClass(const RegType* entry, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_) {
if (entry->IsPreciseReference() == precise) {
// We were or weren't looking for a precise reference and we found what we need.
@@ -67,7 +70,8 @@
DCHECK_EQ(entries_.size(), primitive_count_);
}
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader,
+ const char* descriptor,
bool precise) {
DCHECK(RegTypeCache::primitive_initialized_);
if (descriptor[1] == '\0') {
@@ -159,13 +163,20 @@
return klass;
}
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+StringPiece RegTypeCache::AddString(const StringPiece& string_piece) {
+ char* ptr = arena_.AllocArray<char>(string_piece.length());
+ memcpy(ptr, string_piece.data(), string_piece.length());
+ return StringPiece(ptr, string_piece.length());
+}
+
+const RegType& RegTypeCache::From(mirror::ClassLoader* loader,
+ const char* descriptor,
bool precise) {
+ StringPiece sp_descriptor(descriptor);
// Try looking up the class in the cache first. We use a StringPiece to avoid continual strlen
// operations on the descriptor.
- StringPiece descriptor_sp(descriptor);
for (size_t i = primitive_count_; i < entries_.size(); i++) {
- if (MatchDescriptor(i, descriptor_sp, precise)) {
+ if (MatchDescriptor(i, sp_descriptor, precise)) {
return *(entries_[i]);
}
}
@@ -186,12 +197,11 @@
if (klass->CannotBeAssignedFromOtherTypes() || precise) {
DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
DCHECK(!klass->IsInterface());
- entry = new PreciseReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+ entry = new (&arena_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
} else {
- entry = new ReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+ entry = new (&arena_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
} else { // Class not resolved.
// We tried loading the class and failed, this might get an exception raised
// so we want to clear it before we go on.
@@ -202,9 +212,8 @@
DCHECK(!Thread::Current()->IsExceptionPending());
}
if (IsValidDescriptor(descriptor)) {
- RegType* entry = new UnresolvedReferenceType(descriptor_sp.as_string(), entries_.size());
- AddEntry(entry);
- return *entry;
+ return AddEntry(
+ new (&arena_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
} else {
// The descriptor is broken return the unknown type as there's nothing sensible that
// could be done at runtime
@@ -213,50 +222,65 @@
}
}
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const {
DCHECK(klass != nullptr);
if (klass->IsPrimitive()) {
// Note: precise isn't used for primitive classes. A char is assignable to an int. All
// primitive classes are final.
- return RegTypeFromPrimitiveType(klass->GetPrimitiveType());
- } else {
- // Look for the reference in the list of entries to have.
- for (size_t i = primitive_count_; i < entries_.size(); i++) {
- const RegType* cur_entry = entries_[i];
- if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
- return *cur_entry;
+ return &RegTypeFromPrimitiveType(klass->GetPrimitiveType());
+ }
+ for (auto& pair : klass_entries_) {
+ mirror::Class* const reg_klass = pair.first.Read();
+ if (reg_klass == klass) {
+ const RegType* reg_type = pair.second;
+ if (MatchingPrecisionForClass(reg_type, precise)) {
+ return reg_type;
}
}
- // No reference to the class was found, create new reference.
- RegType* entry;
- if (precise) {
- entry = new PreciseReferenceType(klass, descriptor, entries_.size());
- } else {
- entry = new ReferenceType(klass, descriptor, entries_.size());
- }
- AddEntry(entry);
- return *entry;
}
+ return nullptr;
}
-RegTypeCache::RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
+const RegType* RegTypeCache::InsertClass(const StringPiece& descriptor,
+ mirror::Class* klass,
+ bool precise) {
+ // No reference to the class was found, create new reference.
+ DCHECK(FindClass(klass, precise) == nullptr);
+ RegType* const reg_type = precise
+ ? static_cast<RegType*>(
+ new (&arena_) PreciseReferenceType(klass, descriptor, entries_.size()))
+ : new (&arena_) ReferenceType(klass, descriptor, entries_.size());
+ return &AddEntry(reg_type);
+}
+
+const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+ DCHECK(klass != nullptr);
+ const RegType* reg_type = FindClass(klass, precise);
+ if (reg_type == nullptr) {
+ reg_type = InsertClass(AddString(StringPiece(descriptor)), klass, precise);
+ }
+ return *reg_type;
+}
+
+RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena)
+ : entries_(arena.Adapter(kArenaAllocVerifier)),
+ klass_entries_(arena.Adapter(kArenaAllocVerifier)),
+ can_load_classes_(can_load_classes),
+ arena_(arena) {
if (kIsDebugBuild) {
Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
}
- entries_.reserve(64);
+ // The klass_entries_ array does not have primitives or small constants.
+ static constexpr size_t kNumReserveEntries = 32;
+ klass_entries_.reserve(kNumReserveEntries);
+ // We want to have room for additional entries after inserting primitives and small
+ // constants.
+ entries_.reserve(kNumReserveEntries + kNumPrimitivesAndSmallConstants);
FillPrimitiveAndSmallConstantTypes();
}
RegTypeCache::~RegTypeCache() {
- CHECK_LE(primitive_count_, entries_.size());
- // Delete only the non primitive types.
- if (entries_.size() == kNumPrimitivesAndSmallConstants) {
- // All entries are from the global pool, nothing to delete.
- return;
- }
- std::vector<const RegType*>::iterator non_primitive_begin = entries_.begin();
- std::advance(non_primitive_begin, kNumPrimitivesAndSmallConstants);
- STLDeleteContainerPointers(non_primitive_begin, entries_.end());
+ DCHECK_LE(primitive_count_, entries_.size());
}
void RegTypeCache::ShutDown() {
@@ -318,9 +342,9 @@
}
const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
- BitVector types(1, // Allocate at least a word.
- true, // Is expandable.
- Allocator::GetMallocAllocator()); // TODO: Arenas in the verifier.
+ ArenaBitVector types(&arena_,
+ kDefaultArenaBitVectorBytes * kBitsPerByte, // Allocate at least 8 bytes.
+ true); // Is expandable.
const RegType* left_resolved;
if (left.IsUnresolvedMergedReference()) {
const UnresolvedMergedType* left_merge = down_cast<const UnresolvedMergedType*>(&left);
@@ -361,20 +385,15 @@
const BitVector& unresolved_part = cmp_type->GetUnresolvedTypes();
// Use SameBitsSet. "types" is expandable to allow merging in the components, but the
// BitVector in the final RegType will be made non-expandable.
- if (&resolved_part == &resolved_parts_merged &&
- types.SameBitsSet(&unresolved_part)) {
+ if (&resolved_part == &resolved_parts_merged && types.SameBitsSet(&unresolved_part)) {
return *cur_entry;
}
}
}
-
- // Create entry.
- RegType* entry = new UnresolvedMergedType(resolved_parts_merged,
- types,
- this,
- entries_.size());
- AddEntry(entry);
- return *entry;
+ return AddEntry(new (&arena_) UnresolvedMergedType(resolved_parts_merged,
+ types,
+ this,
+ entries_.size()));
}
const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
@@ -391,14 +410,12 @@
}
}
}
- RegType* entry = new UnresolvedSuperClass(child.GetId(), this, entries_.size());
- AddEntry(entry);
- return *entry;
+ return AddEntry(new (&arena_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
}
const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
UninitializedType* entry = nullptr;
- const std::string& descriptor(type.GetDescriptor());
+ const StringPiece& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
@@ -409,7 +426,9 @@
return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
}
}
- entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
+ entry = new (&arena_) UnresolvedUninitializedRefType(descriptor,
+ allocation_pc,
+ entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -421,17 +440,19 @@
return *down_cast<const UninitializedReferenceType*>(cur_entry);
}
}
- entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
+ entry = new (&arena_) UninitializedReferenceType(klass,
+ descriptor,
+ allocation_pc,
+ entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
RegType* entry;
if (uninit_type.IsUnresolvedTypes()) {
- const std::string& descriptor(uninit_type.GetDescriptor());
+ const StringPiece& descriptor(uninit_type.GetDescriptor());
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
if (cur_entry->IsUnresolvedReference() &&
@@ -439,7 +460,7 @@
return *cur_entry;
}
}
- entry = new UnresolvedReferenceType(descriptor, entries_.size());
+ entry = new (&arena_) UnresolvedReferenceType(descriptor, entries_.size());
} else {
mirror::Class* klass = uninit_type.GetClass();
if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
@@ -450,7 +471,7 @@
return *cur_entry;
}
}
- entry = new ReferenceType(klass, "", entries_.size());
+ entry = new (&arena_) ReferenceType(klass, "", entries_.size());
} else if (!klass->IsPrimitive()) {
// We're uninitialized because of allocation, look or create a precise type as allocations
// may only create objects of that type.
@@ -469,18 +490,19 @@
return *cur_entry;
}
}
- entry = new PreciseReferenceType(klass, uninit_type.GetDescriptor(), entries_.size());
+ entry = new (&arena_) PreciseReferenceType(klass,
+ uninit_type.GetDescriptor(),
+ entries_.size());
} else {
return Conflict();
}
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
UninitializedType* entry;
- const std::string& descriptor(type.GetDescriptor());
+ const StringPiece& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
@@ -489,7 +511,7 @@
return *down_cast<const UninitializedType*>(cur_entry);
}
}
- entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
+ entry = new (&arena_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -498,10 +520,9 @@
return *down_cast<const UninitializedType*>(cur_entry);
}
}
- entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
+ entry = new (&arena_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
@@ -515,12 +536,11 @@
}
ConstantType* entry;
if (precise) {
- entry = new PreciseConstType(value, entries_.size());
+ entry = new (&arena_) PreciseConstType(value, entries_.size());
} else {
- entry = new ImpreciseConstType(value, entries_.size());
+ entry = new (&arena_) ImpreciseConstType(value, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
@@ -533,12 +553,11 @@
}
ConstantType* entry;
if (precise) {
- entry = new PreciseConstLoType(value, entries_.size());
+ entry = new (&arena_) PreciseConstLoType(value, entries_.size());
} else {
- entry = new ImpreciseConstLoType(value, entries_.size());
+ entry = new (&arena_) ImpreciseConstLoType(value, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
@@ -551,32 +570,30 @@
}
ConstantType* entry;
if (precise) {
- entry = new PreciseConstHiType(value, entries_.size());
+ entry = new (&arena_) PreciseConstHiType(value, entries_.size());
} else {
- entry = new ImpreciseConstHiType(value, entries_.size());
+ entry = new (&arena_) ImpreciseConstHiType(value, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
if (!array.IsArrayTypes()) {
return Conflict();
} else if (array.IsUnresolvedTypes()) {
- const std::string& descriptor(array.GetDescriptor());
- const std::string component(descriptor.substr(1, descriptor.size() - 1));
- return FromDescriptor(loader, component.c_str(), false);
+ const std::string descriptor(array.GetDescriptor().as_string());
+ return FromDescriptor(loader, descriptor.c_str() + 1, false);
} else {
mirror::Class* klass = array.GetClass()->GetComponentType();
std::string temp;
+ const char* descriptor = klass->GetDescriptor(&temp);
if (klass->IsErroneous()) {
// Arrays may have erroneous component types, use unresolved in that case.
// We assume that the primitive classes are not erroneous, so we know it is a
// reference type.
- return FromDescriptor(loader, klass->GetDescriptor(&temp), false);
+ return FromDescriptor(loader, descriptor, false);
} else {
- return FromClass(klass->GetDescriptor(&temp), klass,
- klass->CannotBeAssignedFromOtherTypes());
+ return FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
}
}
}
@@ -618,10 +635,10 @@
for (size_t i = primitive_count_; i < entries_.size(); ++i) {
entries_[i]->VisitRoots(visitor, root_info);
}
-}
-
-void RegTypeCache::AddEntry(RegType* new_entry) {
- entries_.push_back(new_entry);
+ for (auto& pair : klass_entries_) {
+ GcRoot<mirror::Class>& root = pair.first;
+ root.VisitRoot(visitor, root_info);
+ }
}
} // namespace verifier
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 93948a1..6f9a04e 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -19,6 +19,7 @@
#include "base/casts.h"
#include "base/macros.h"
+#include "base/scoped_arena_containers.h"
#include "object_callbacks.h"
#include "reg_type.h"
#include "runtime.h"
@@ -31,15 +32,19 @@
class Class;
class ClassLoader;
} // namespace mirror
+class ScopedArenaAllocator;
class StringPiece;
namespace verifier {
class RegType;
+// Use 8 bytes since that is the default arena allocator alignment.
+static constexpr size_t kDefaultArenaBitVectorBytes = 8;
+
class RegTypeCache {
public:
- explicit RegTypeCache(bool can_load_classes);
+ explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
~RegTypeCache();
static void Init() SHARED_REQUIRES(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
@@ -53,6 +58,13 @@
const art::verifier::RegType& GetFromId(uint16_t id) const;
const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Find a RegType, returns null if not found.
+ const RegType* FindClass(mirror::Class* klass, bool precise) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // Insert a new class with a specified descriptor, must not already be in the cache.
+ const RegType* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool precise)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // Get or insert a reg type for a description, klass, and precision.
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_);
const ConstantType& FromCat1Const(int32_t value, bool precise)
@@ -150,7 +162,13 @@
const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_);
- void AddEntry(RegType* new_entry);
+ // Returns the pass in RegType.
+ template <class RegTypeType>
+ RegTypeType& AddEntry(RegTypeType* new_entry) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Add a string piece to the arena allocator so that it stays live for the lifetime of the
+ // verifier.
+ StringPiece AddString(const StringPiece& string_piece);
template <class Type>
static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
@@ -160,7 +178,8 @@
// A quick look up for popular small constants.
static constexpr int32_t kMinSmallConstant = -1;
static constexpr int32_t kMaxSmallConstant = 4;
- static const PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+ static const PreciseConstType* small_precise_constants_[kMaxSmallConstant -
+ kMinSmallConstant + 1];
static constexpr size_t kNumPrimitivesAndSmallConstants =
12 + (kMaxSmallConstant - kMinSmallConstant + 1);
@@ -172,11 +191,17 @@
static uint16_t primitive_count_;
// The actual storage for the RegTypes.
- std::vector<const RegType*> entries_;
+ ScopedArenaVector<const RegType*> entries_;
+
+ // Fast lookup for quickly finding entries that have a matching class.
+ ScopedArenaVector<std::pair<GcRoot<mirror::Class>, const RegType*>> klass_entries_;
// Whether or not we're allowed to load classes.
const bool can_load_classes_;
+ // Arena allocator.
+ ScopedArenaAllocator& arena_;
+
DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
};
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 971b1f5..22ac7e4 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -20,6 +20,7 @@
#include "base/bit_vector.h"
#include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
#include "common_runtime_test.h"
#include "reg_type_cache-inl.h"
#include "reg_type-inl.h"
@@ -29,12 +30,23 @@
namespace art {
namespace verifier {
-class RegTypeTest : public CommonRuntimeTest {};
+class BaseRegTypeTest : public CommonRuntimeTest {
+ public:
+ void PostRuntimeCreate() OVERRIDE {
+ stack.reset(new ArenaStack(Runtime::Current()->GetArenaPool()));
+ allocator.reset(new ScopedArenaAllocator(stack.get()));
+ }
+
+ std::unique_ptr<ArenaStack> stack;
+ std::unique_ptr<ScopedArenaAllocator> allocator;
+};
+
+class RegTypeTest : public BaseRegTypeTest {};
TEST_F(RegTypeTest, ConstLoHi) {
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
@@ -56,7 +68,7 @@
TEST_F(RegTypeTest, Pairs) {
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
int64_t val = static_cast<int32_t>(1234);
const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
@@ -80,7 +92,7 @@
TEST_F(RegTypeTest, Primitives) {
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& bool_reg_type = cache.Boolean();
EXPECT_FALSE(bool_reg_type.IsUndefined());
@@ -347,13 +359,13 @@
EXPECT_TRUE(double_reg_type.HasClass());
}
-class RegTypeReferenceTest : public CommonRuntimeTest {};
+class RegTypeReferenceTest : public BaseRegTypeTest {};
TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) {
// Tests matching precisions. A reference type that was created precise doesn't
// match the one that is imprecise.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& imprecise_obj = cache.JavaLangObject(false);
const RegType& precise_obj = cache.JavaLangObject(true);
const RegType& precise_obj_2 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -368,7 +380,7 @@
// Tests creating unresolved types. Miss for the first time asking the cache and
// a hit second time.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
@@ -384,7 +396,7 @@
TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) {
// Tests creating types uninitialized types from unresolved types.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
const RegType& ref_type = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
@@ -406,7 +418,7 @@
TEST_F(RegTypeReferenceTest, Dump) {
// Tests types for proper Dump messages.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& unresolved_ref = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
const RegType& unresolved_ref_another = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistEither;", true);
const RegType& resolved_ref = cache.JavaLangString();
@@ -431,7 +443,7 @@
// Hit the second time. Then check for the same effect when using
// The JavaLangObject method instead of FromDescriptor. String class is final.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type = cache.JavaLangString();
const RegType& ref_type_2 = cache.JavaLangString();
const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/String;", true);
@@ -451,7 +463,7 @@
// Hit the second time. Then I am checking for the same effect when using
// The JavaLangObject method instead of FromDescriptor. Object Class in not final.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type = cache.JavaLangObject(true);
const RegType& ref_type_2 = cache.JavaLangObject(true);
const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -464,7 +476,7 @@
// Tests merging logic
// String and object , LUB is object.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
const RegType& string = cache_new.JavaLangString();
const RegType& Object = cache_new.JavaLangObject(true);
EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
@@ -487,7 +499,7 @@
TEST_F(RegTypeTest, MergingFloat) {
// Testing merging logic with float and float constants.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
constexpr int32_t kTestConstantValue = 10;
const RegType& float_type = cache_new.Float();
@@ -518,7 +530,7 @@
TEST_F(RegTypeTest, MergingLong) {
// Testing merging logic with long and long constants.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
constexpr int32_t kTestConstantValue = 10;
const RegType& long_lo_type = cache_new.LongLo();
@@ -572,7 +584,7 @@
TEST_F(RegTypeTest, MergingDouble) {
// Testing merging logic with double and double constants.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
constexpr int32_t kTestConstantValue = 10;
const RegType& double_lo_type = cache_new.DoubleLo();
@@ -626,7 +638,7 @@
TEST_F(RegTypeTest, ConstPrecision) {
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
const RegType& precise_const = cache_new.FromCat1Const(10, true);
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 1df2428..57fb701 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -182,6 +182,21 @@
}
}
+inline RegisterLine* RegisterLine::Create(size_t num_regs, MethodVerifier* verifier) {
+ void* memory = verifier->GetArena().Alloc(OFFSETOF_MEMBER(RegisterLine, line_) +
+ (num_regs * sizeof(uint16_t)));
+ return new (memory) RegisterLine(num_regs, verifier);
+}
+
+inline RegisterLine::RegisterLine(size_t num_regs, MethodVerifier* verifier)
+ : num_regs_(num_regs),
+ monitors_(verifier->GetArena().Adapter(kArenaAllocVerifier)),
+ reg_to_lock_depths_(std::less<uint32_t>(), verifier->GetArena().Adapter(kArenaAllocVerifier)),
+ this_initialized_(false) {
+ std::uninitialized_fill_n(line_, num_regs_, 0u);
+ SetResultTypeToUnknown(verifier);
+}
+
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index f48b1e1..37343b5 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -412,12 +412,9 @@
}
}
-// Check whether there is another register in the search map that is locked the same way as the
-// register in the src map. This establishes an alias.
-static bool FindLockAliasedRegister(
- uint32_t src,
- const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& src_map,
- const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& search_map) {
+bool FindLockAliasedRegister(uint32_t src,
+ const RegisterLine::RegToLockDepthsMap& src_map,
+ const RegisterLine::RegToLockDepthsMap& search_map) {
auto it = src_map.find(src);
if (it == src_map.end()) {
// "Not locked" is trivially aliased.
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 46db1c6..b2f5555 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -20,6 +20,7 @@
#include <memory>
#include <vector>
+#include "base/scoped_arena_containers.h"
#include "safe_map.h"
namespace art {
@@ -58,11 +59,11 @@
// stack of entered monitors (identified by code unit offset).
class RegisterLine {
public:
- static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier) {
- void* memory = operator new(sizeof(RegisterLine) + (num_regs * sizeof(uint16_t)));
- RegisterLine* rl = new (memory) RegisterLine(num_regs, verifier);
- return rl;
- }
+ // A map from register to a bit vector of indices into the monitors_ stack.
+ using RegToLockDepthsMap = ScopedArenaSafeMap<uint32_t, uint32_t>;
+
+ // Create a register line of num_regs registers.
+ static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier);
// Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
@@ -311,11 +312,11 @@
// Write a bit at each register location that holds a reference.
void WriteReferenceBitMap(MethodVerifier* verifier, std::vector<uint8_t>* data, size_t max_bytes);
- size_t GetMonitorEnterCount() {
+ size_t GetMonitorEnterCount() const {
return monitors_.size();
}
- uint32_t GetMonitorEnterDexPc(size_t i) {
+ uint32_t GetMonitorEnterDexPc(size_t i) const {
return monitors_[i];
}
@@ -375,11 +376,7 @@
reg_to_lock_depths_.erase(reg);
}
- RegisterLine(size_t num_regs, MethodVerifier* verifier)
- : num_regs_(num_regs), this_initialized_(false) {
- memset(&line_, 0, num_regs_ * sizeof(uint16_t));
- SetResultTypeToUnknown(verifier);
- }
+ RegisterLine(size_t num_regs, MethodVerifier* verifier);
// Storage for the result register's type, valid after an invocation.
uint16_t result_[2];
@@ -388,17 +385,18 @@
const uint32_t num_regs_;
// A stack of monitor enter locations.
- std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
+ ScopedArenaVector<uint32_t> monitors_;
+
// A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
// stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
// monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5.
- AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_;
+ RegToLockDepthsMap reg_to_lock_depths_;
// Whether "this" initialization (a constructor supercall) has happened.
bool this_initialized_;
// An array of RegType Ids associated with each dex register.
- uint16_t line_[0];
+ uint16_t line_[1];
DISALLOW_COPY_AND_ASSIGN(RegisterLine);
};
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index c984b17..b76555b 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -150,10 +150,15 @@
// Do we have a managed handler? If so, run it first.
SpecialSignalHandlerFn managed = user_sigactions[sig].GetSpecialHandler();
if (managed != nullptr) {
+ sigset_t mask, old_mask;
+ sigfillset(&mask);
+ sigprocmask(SIG_BLOCK, &mask, &old_mask);
// Call the handler. If it succeeds, we're done.
if (managed(sig, info, context)) {
+ sigprocmask(SIG_SETMASK, &old_mask, nullptr);
return;
}
+ sigprocmask(SIG_SETMASK, &old_mask, nullptr);
}
const struct sigaction& action = user_sigactions[sig].GetAction();
@@ -166,7 +171,10 @@
}
} else {
if (action.sa_sigaction != nullptr) {
+ sigset_t old_mask;
+ sigprocmask(SIG_BLOCK, &action.sa_mask, &old_mask);
action.sa_sigaction(sig, info, context);
+ sigprocmask(SIG_SETMASK, &old_mask, nullptr);
} else {
signal(sig, SIG_DFL);
raise(sig);
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index f8d321c..34fb3f8 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -19,15 +19,17 @@
namespace art {
-#define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
- int t[] = {__VA_ARGS__}; \
- int t_size = sizeof(t) / sizeof(*t); \
- uintptr_t native_quick_pc = GetCurrentCode().ToNativeQuickPc(dex_pc, \
- /* is_catch_handler */ false, \
- abort_if_not_found); \
- if (native_quick_pc != UINTPTR_MAX) { \
- CheckReferences(t, t_size, GetCurrentCode().NativeQuickPcOffset(native_quick_pc)); \
- } \
+#define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
+ int t[] = {__VA_ARGS__}; \
+ int t_size = sizeof(t) / sizeof(*t); \
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); \
+ uintptr_t native_quick_pc = method_header->ToNativeQuickPc(GetMethod(), \
+ dex_pc, \
+ /* is_catch_handler */ false, \
+ abort_if_not_found); \
+ if (native_quick_pc != UINTPTR_MAX) { \
+ CheckReferences(t, t_size, method_header->NativeQuickPcOffset(native_quick_pc)); \
+ } \
} while (false);
struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
@@ -49,7 +51,7 @@
CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x
CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x
CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x
- if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x
}
CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1); // v8: this, v3: y, v1: x
@@ -65,7 +67,7 @@
// Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1); // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
- if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
// v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);
// v5 is removed from the root set because there is a "merge" operation.
@@ -74,7 +76,7 @@
}
CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
- if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
}
CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
diff --git a/test/004-UnsafeTest/src/Main.java b/test/004-UnsafeTest/src/Main.java
index c93db50..5b22e88 100644
--- a/test/004-UnsafeTest/src/Main.java
+++ b/test/004-UnsafeTest/src/Main.java
@@ -129,13 +129,36 @@
System.out.println("Unexpectedly not succeeding compareAndSwapLong...");
}
- if (unsafe.compareAndSwapObject(t, objectOffset, null, new Object())) {
+ // We do not use `null` as argument to sun.misc.Unsafe.compareAndSwapObject
+ // in those tests, as this value is not affected by heap poisoning
+ // (which uses address negation to poison and unpoison heap object
+ // references). This way, when heap poisoning is enabled, we can
+ // better exercise its implementation within that method.
+ if (unsafe.compareAndSwapObject(t, objectOffset, new Object(), new Object())) {
System.out.println("Unexpectedly succeeding compareAndSwapObject...");
}
- if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue, null)) {
+ Object objectValue2 = new Object();
+ if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue, objectValue2)) {
System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
}
- if (!unsafe.compareAndSwapObject(t, objectOffset, null, new Object())) {
+ Object objectValue3 = new Object();
+ if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue2, objectValue3)) {
+ System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
+ }
+
+ // Exercise sun.misc.Unsafe.compareAndSwapObject using the same
+ // object (`t`) for the `obj` and `newValue` arguments.
+ if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue3, t)) {
+ System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
+ }
+ // Exercise sun.misc.Unsafe.compareAndSwapObject using the same
+ // object (`t`) for the `obj`, `expectedValue` and `newValue` arguments.
+ if (!unsafe.compareAndSwapObject(t, objectOffset, t, t)) {
+ System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
+ }
+ // Exercise sun.misc.Unsafe.compareAndSwapObject using the same
+ // object (`t`) for the `obj` and `expectedValue` arguments.
+ if (!unsafe.compareAndSwapObject(t, objectOffset, t, new Object())) {
System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
}
}
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 948273a..e9946c8 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -390,6 +390,20 @@
#endif
#endif
+static bool cannot_be_blocked(int signum) {
+ // These two sigs cannot be blocked anywhere.
+ if ((signum == SIGKILL) || (signum == SIGSTOP)) {
+ return true;
+ }
+
+ // The invalid rt_sig cannot be blocked.
+ if (((signum >= 32) && (signum < SIGRTMIN)) || (signum > SIGRTMAX)) {
+ return true;
+ }
+
+ return false;
+}
+
// A dummy special handler, continueing after the faulting location. This code comes from
// 004-SignalTest.
static bool nb_signalhandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED, void* context) {
@@ -413,6 +427,23 @@
UNUSED(context);
#endif
}
+
+ // Before invoking this handler, all other unclaimed signals must be blocked.
+ // We're trying to check the signal mask to verify its status here.
+ sigset_t tmpset;
+ sigemptyset(&tmpset);
+ sigprocmask(SIG_SETMASK, nullptr, &tmpset);
+ int other_claimed = (sig == SIGSEGV) ? SIGILL : SIGSEGV;
+ for (int signum = 0; signum < NSIG; ++signum) {
+ if (cannot_be_blocked(signum)) {
+ continue;
+ } else if ((sigismember(&tmpset, signum)) && (signum == other_claimed)) {
+ printf("ERROR: The claimed signal %d is blocked\n", signum);
+ } else if ((!sigismember(&tmpset, signum)) && (signum != other_claimed)) {
+ printf("ERROR: The unclaimed signal %d is not blocked\n", signum);
+ }
+ }
+
// We handled this...
return true;
}
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index 134abd1..f1885de 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -537,6 +537,17 @@
return ((SubclassA)a).toString();
}
+
+ /// CHECK-START: void Main.argumentCheck(Super, double, SubclassA, Final) reference_type_propagation (after)
+ /// CHECK: ParameterValue klass:Main can_be_null:false exact:false
+ /// CHECK: ParameterValue klass:Super can_be_null:true exact:false
+ /// CHECK: ParameterValue
+ /// CHECK: ParameterValue klass:SubclassA can_be_null:true exact:false
+ /// CHECK: ParameterValue klass:Final can_be_null:true exact:true
+ /// CHECK-NOT: ParameterValue
+ private void argumentCheck(Super s, double d, SubclassA a, Final f) {
+ }
+
public static void main(String[] args) {
}
}
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 0ee2ff9..30f9954 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -15,9 +15,9 @@
*/
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack.h"
#include "thread.h"
@@ -46,12 +46,12 @@
CHECK_EQ(value, 42u);
bool success = GetVReg(m, 1, kIntVReg, &value);
- if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK(!success);
}
success = GetVReg(m, 2, kIntVReg, &value);
- if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK(!success);
}
@@ -83,12 +83,12 @@
CHECK_EQ(value, 42u);
bool success = GetVRegPair(m, 2, kLongLoVReg, kLongHiVReg, &value);
- if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK(!success);
}
success = GetVRegPair(m, 4, kLongLoVReg, kLongHiVReg, &value);
- if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK(!success);
}
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index 6fcebdb..64b2336 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -15,9 +15,9 @@
*/
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack.h"
#include "thread.h"
@@ -64,7 +64,7 @@
CHECK_EQ(value, 1u);
bool success = GetVReg(m, 2, kIntVReg, &value);
- if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK(!success);
}
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 2a56a7f..375a3fc 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -15,9 +15,9 @@
*/
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack.h"
#include "thread.h"
@@ -44,7 +44,7 @@
found_method_ = true;
uint32_t value = 0;
if (GetCurrentQuickFrame() != nullptr &&
- GetCurrentCode().IsOptimized(sizeof(void*)) &&
+ GetCurrentOatQuickMethodHeader()->IsOptimized() &&
!Runtime::Current()->IsDebuggable()) {
CHECK_EQ(GetVReg(m, 0, kIntVReg, &value), false);
} else {
diff --git a/test/527-checker-array-access-split/expected.txt b/test/527-checker-array-access-split/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/527-checker-array-access-split/expected.txt
diff --git a/test/527-checker-array-access-split/info.txt b/test/527-checker-array-access-split/info.txt
new file mode 100644
index 0000000..9206804
--- /dev/null
+++ b/test/527-checker-array-access-split/info.txt
@@ -0,0 +1 @@
+Test arm64-specific array access optimization.
diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java
new file mode 100644
index 0000000..ead9446
--- /dev/null
+++ b/test/527-checker-array-access-split/src/Main.java
@@ -0,0 +1,341 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ /**
+ * Test that HArrayGet with a constant index is not split.
+ */
+
+ /// CHECK-START-ARM64: int Main.constantIndexGet(int[]) instruction_simplifier_arm64 (before)
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: ArrayGet [<<Array>>,<<Index>>]
+
+ /// CHECK-START-ARM64: int Main.constantIndexGet(int[]) instruction_simplifier_arm64 (after)
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK-NOT: Arm64IntermediateAddress
+ /// CHECK: ArrayGet [<<Array>>,<<Index>>]
+
+ public static int constantIndexGet(int array[]) {
+ return array[1];
+ }
+
+ /**
+ * Test that HArraySet with a constant index is not split.
+ */
+
+ /// CHECK-START-ARM64: void Main.constantIndexSet(int[]) instruction_simplifier_arm64 (before)
+ /// CHECK: <<Const2:i\d+>> IntConstant 2
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Const2>>]
+
+ /// CHECK-START-ARM64: void Main.constantIndexSet(int[]) instruction_simplifier_arm64 (after)
+ /// CHECK: <<Const2:i\d+>> IntConstant 2
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK-NOT: Arm64IntermediateAddress
+ /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Const2>>]
+
+
+ public static void constantIndexSet(int array[]) {
+ array[1] = 2;
+ }
+
+ /**
+ * Test basic splitting of HArrayGet.
+ */
+
+ /// CHECK-START-ARM64: int Main.get(int[], int) instruction_simplifier_arm64 (before)
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: ArrayGet [<<Array>>,<<Index>>]
+
+ /// CHECK-START-ARM64: int Main.get(int[], int) instruction_simplifier_arm64 (after)
+ /// CHECK: <<DataOffset:i\d+>> IntConstant
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: <<Address:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-NEXT: ArrayGet [<<Address>>,<<Index>>]
+
+ public static int get(int array[], int index) {
+ return array[index];
+ }
+
+ /**
+ * Test basic splitting of HArraySet.
+ */
+
+ /// CHECK-START-ARM64: void Main.set(int[], int, int) instruction_simplifier_arm64 (before)
+ /// CHECK: ParameterValue
+ /// CHECK: ParameterValue
+ /// CHECK: <<Arg:i\d+>> ParameterValue
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Arg>>]
+
+ /// CHECK-START-ARM64: void Main.set(int[], int, int) instruction_simplifier_arm64 (after)
+ /// CHECK: ParameterValue
+ /// CHECK: ParameterValue
+ /// CHECK: <<Arg:i\d+>> ParameterValue
+ /// CHECK: <<DataOffset:i\d+>> IntConstant
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: <<Address:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-NEXT: ArraySet [<<Address>>,<<Index>>,<<Arg>>]
+
+ public static void set(int array[], int index, int value) {
+ array[index] = value;
+ }
+
+ /**
+ * Check that the intermediate address can be shared after GVN.
+ */
+
+ /// CHECK-START-ARM64: void Main.getSet(int[], int) instruction_simplifier_arm64 (before)
+ /// CHECK: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Array>>,<<Index>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+ /// CHECK-START-ARM64: void Main.getSet(int[], int) instruction_simplifier_arm64 (after)
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: <<Address1:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: <<Address2:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+ /// CHECK-START-ARM64: void Main.getSet(int[], int) GVN_after_arch (after)
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: <<Address:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address>>,<<Index>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK-NOT: Arm64IntermediateAddress
+ /// CHECK: ArraySet [<<Address>>,<<Index>>,<<Add>>]
+
+ public static void getSet(int array[], int index) {
+ array[index] = array[index] + 1;
+ }
+
+ /**
+ * Check that the intermediate address computation is not reordered or merged
+ * across IRs that can trigger GC.
+ */
+
+ /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) instruction_simplifier_arm64 (before)
+ /// CHECK: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Array>>,<<Index>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: NewArray
+ /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+ /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) instruction_simplifier_arm64 (after)
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: <<Address1:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: NewArray
+ /// CHECK: <<Address2:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+ /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) GVN_after_arch (after)
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant
+ /// CHECK: <<Array:l\d+>> NullCheck
+ /// CHECK: <<Index:i\d+>> BoundsCheck
+ /// CHECK: <<Address1:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: NewArray
+ /// CHECK: <<Address2:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+ public static int[] accrossGC(int array[], int index) {
+ int tmp = array[index] + 1;
+ int[] new_array = new int[1];
+ array[index] = tmp;
+ return new_array;
+ }
+
+ /**
+ * Test that the intermediate address is shared between array accesses after
+ * the bounds check have been removed by BCE.
+ */
+
+ /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() instruction_simplifier_arm64 (before)
+ /// CHECK: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: <<Array:l\d+>> NewArray
+ /// CHECK: <<Index:i\d+>> Phi
+ /// CHECK: If
+ // -------------- Loop
+ /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Array>>,<<Index>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+ // By the time we reach the architecture-specific instruction simplifier, BCE
+ // has removed the bounds checks in the loop.
+
+ // Note that we do not care that the `DataOffset` is `12`. But if we do not
+ // specify it and any other `IntConstant` appears before that instruction,
+ // checker will match the previous `IntConstant`, and we will thus fail the
+ // check.
+
+ /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() instruction_simplifier_arm64 (after)
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
+ /// CHECK: <<Array:l\d+>> NewArray
+ /// CHECK: <<Index:i\d+>> Phi
+ /// CHECK: If
+ // -------------- Loop
+ /// CHECK: <<Address1:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK: <<Address2:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+ /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() GVN_after_arch (after)
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
+ /// CHECK: <<Array:l\d+>> NewArray
+ /// CHECK: <<Index:i\d+>> Phi
+ /// CHECK: If
+ // -------------- Loop
+ /// CHECK: <<Address:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address>>,<<Index>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>]
+ /// CHECK-NOT: Arm64IntermediateAddress
+ /// CHECK: ArraySet [<<Address>>,<<Index>>,<<Add>>]
+
+ public static int canMergeAfterBCE1() {
+ int[] array = {0, 1, 2, 3};
+ for (int i = 0; i < array.length; i++) {
+ array[i] = array[i] + 1;
+ }
+ return array[array.length - 1];
+ }
+
+ /**
+ * This test case is similar to `canMergeAfterBCE1`, but with different
+ * indexes for the accesses.
+ */
+
+ /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() instruction_simplifier_arm64 (before)
+ /// CHECK: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: <<Array:l\d+>> NewArray
+ /// CHECK: <<Index:i\d+>> Phi
+ /// CHECK: If
+ // -------------- Loop
+ /// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>]
+ /// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Array>>,<<Index>>]
+ /// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Array>>,<<Index1>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+ /// CHECK: ArraySet [<<Array>>,<<Index1>>,<<Add>>]
+
+ // Note that we do not care that the `DataOffset` is `12`. But if we do not
+ // specify it and any other `IntConstant` appears before that instruction,
+ // checker will match the previous `IntConstant`, and we will thus fail the
+ // check.
+
+ /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() instruction_simplifier_arm64 (after)
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
+ /// CHECK: <<Array:l\d+>> NewArray
+ /// CHECK: <<Index:i\d+>> Phi
+ /// CHECK: If
+ // -------------- Loop
+ /// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>]
+ /// CHECK-DAG: <<Address1:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address1>>,<<Index>>]
+ /// CHECK-DAG: <<Address2:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address2>>,<<Index1>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+ /// CHECK: <<Address3:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK: ArraySet [<<Address3>>,<<Index1>>,<<Add>>]
+
+ /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN_after_arch (after)
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
+ /// CHECK: <<Array:l\d+>> NewArray
+ /// CHECK: <<Index:i\d+>> Phi
+ /// CHECK: If
+ // -------------- Loop
+ /// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>]
+ /// CHECK-DAG: <<Address:l\d+>> Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+ /// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address>>,<<Index>>]
+ /// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address>>,<<Index1>>]
+ /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+ /// CHECK: ArraySet [<<Address>>,<<Index1>>,<<Add>>]
+
+ // There should be only one intermediate address computation in the loop.
+
+ /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN_after_arch (after)
+ /// CHECK: Arm64IntermediateAddress
+ /// CHECK-NOT: Arm64IntermediateAddress
+
+ public static int canMergeAfterBCE2() {
+ int[] array = {0, 1, 2, 3};
+ for (int i = 0; i < array.length - 1; i++) {
+ array[i + 1] = array[i] + array[i + 1];
+ }
+ return array[array.length - 1];
+ }
+
+
+ public static void main(String[] args) {
+ int[] array = {123, 456, 789};
+
+ assertIntEquals(456, constantIndexGet(array));
+
+ constantIndexSet(array);
+ assertIntEquals(2, array[1]);
+
+ assertIntEquals(789, get(array, 2));
+
+ set(array, 1, 456);
+ assertIntEquals(456, array[1]);
+
+ getSet(array, 0);
+ assertIntEquals(124, array[0]);
+
+ accrossGC(array, 0);
+ assertIntEquals(125, array[0]);
+
+ assertIntEquals(4, canMergeAfterBCE1());
+ assertIntEquals(6, canMergeAfterBCE2());
+ }
+}
diff --git a/test/530-checker-lse/expected.txt b/test/530-checker-lse/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/530-checker-lse/expected.txt
diff --git a/test/530-checker-lse/info.txt b/test/530-checker-lse/info.txt
new file mode 100644
index 0000000..5b45e20
--- /dev/null
+++ b/test/530-checker-lse/info.txt
@@ -0,0 +1 @@
+Checker test for testing load-store elimination.
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
new file mode 100644
index 0000000..c766aaa
--- /dev/null
+++ b/test/530-checker-lse/src/Main.java
@@ -0,0 +1,512 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Circle {
+ Circle(double radius) {
+ this.radius = radius;
+ }
+ public double getArea() {
+ return radius * radius * Math.PI;
+ }
+ private double radius;
+};
+
+class TestClass {
+ TestClass() {
+ }
+ TestClass(int i, int j) {
+ this.i = i;
+ this.j = j;
+ }
+ int i;
+ int j;
+ volatile int k;
+ TestClass next;
+ static int si;
+};
+
+class SubTestClass extends TestClass {
+ int k;
+};
+
+class TestClass2 {
+ int i;
+ int j;
+};
+
+public class Main {
+
+ /// CHECK-START: double Main.calcCircleArea(double) load_store_elimination (before)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: double Main.calcCircleArea(double) load_store_elimination (after)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldGet
+
+ static double calcCircleArea(double radius) {
+ return new Circle(radius).getArea();
+ }
+
+ /// CHECK-START: int Main.test1(TestClass, TestClass) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test1(TestClass, TestClass) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: InstanceFieldGet
+
+ // Different fields shouldn't alias.
+ static int test1(TestClass obj1, TestClass obj2) {
+ obj1.i = 1;
+ obj2.j = 2;
+ return obj1.i + obj2.j;
+ }
+
+ /// CHECK-START: int Main.test2(TestClass) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test2(TestClass) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldGet
+
+ // Redundant store of the same value.
+ static int test2(TestClass obj) {
+ obj.j = 1;
+ obj.j = 1;
+ return obj.j;
+ }
+
+ /// CHECK-START: int Main.test3(TestClass) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test3(TestClass) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldGet
+
+ // A new allocation shouldn't alias with pre-existing values.
+ static int test3(TestClass obj) {
+ obj.i = 1;
+ obj.next.j = 2;
+ TestClass obj2 = new TestClass();
+ obj2.i = 3;
+ obj2.j = 4;
+ return obj.i + obj.next.j + obj2.i + obj2.j;
+ }
+
+ /// CHECK-START: int Main.test4(TestClass, boolean) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: Return
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: int Main.test4(TestClass, boolean) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: InstanceFieldGet
+ /// CHECK: Return
+ /// CHECK: InstanceFieldSet
+
+ // Set and merge the same value in two branches.
+ static int test4(TestClass obj, boolean b) {
+ if (b) {
+ obj.i = 1;
+ } else {
+ obj.i = 1;
+ }
+ return obj.i;
+ }
+
+ /// CHECK-START: int Main.test5(TestClass, boolean) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: Return
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: int Main.test5(TestClass, boolean) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: Return
+ /// CHECK: InstanceFieldSet
+
+ // Set and merge different values in two branches.
+ static int test5(TestClass obj, boolean b) {
+ if (b) {
+ obj.i = 1;
+ } else {
+ obj.i = 2;
+ }
+ return obj.i;
+ }
+
+ /// CHECK-START: int Main.test6(TestClass, TestClass, boolean) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test6(TestClass, TestClass, boolean) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: InstanceFieldGet
+
+ // Setting the same value doesn't clear the value for aliased locations.
+ static int test6(TestClass obj1, TestClass obj2, boolean b) {
+ obj1.i = 1;
+ obj1.j = 2;
+ if (b) {
+ obj2.j = 2;
+ }
+ return obj1.j + obj2.j;
+ }
+
+ /// CHECK-START: int Main.test7(TestClass) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test7(TestClass) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ // Invocation should kill values in non-singleton heap locations.
+ static int test7(TestClass obj) {
+ obj.i = 1;
+ System.out.print("");
+ return obj.i;
+ }
+
+ /// CHECK-START: int Main.test8() load_store_elimination (before)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InvokeVirtual
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test8() load_store_elimination (after)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InvokeVirtual
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: InstanceFieldGet
+
+ // Invocation should not kill values in singleton heap locations.
+ static int test8() {
+ TestClass obj = new TestClass();
+ obj.i = 1;
+ System.out.print("");
+ return obj.i;
+ }
+
+ /// CHECK-START: int Main.test9(TestClass) load_store_elimination (before)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test9(TestClass) load_store_elimination (after)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ // Invocation should kill values in non-singleton heap locations.
+ static int test9(TestClass obj) {
+ TestClass obj2 = new TestClass();
+ obj2.i = 1;
+ obj.next = obj2;
+ System.out.print("");
+ return obj2.i;
+ }
+
+ /// CHECK-START: int Main.test10(TestClass) load_store_elimination (before)
+ /// CHECK: StaticFieldGet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: StaticFieldSet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test10(TestClass) load_store_elimination (after)
+ /// CHECK: StaticFieldGet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: StaticFieldSet
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: InstanceFieldGet
+
+ // Static fields shouldn't alias with instance fields.
+ static int test10(TestClass obj) {
+ TestClass.si += obj.i;
+ return obj.i;
+ }
+
+ /// CHECK-START: int Main.test11(TestClass) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test11(TestClass) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: InstanceFieldGet
+
+ // Loop without heap writes.
+ // obj.i is actually hoisted to the loop pre-header by licm already.
+ static int test11(TestClass obj) {
+ obj.i = 1;
+ int sum = 0;
+ for (int i = 0; i < 10; i++) {
+ sum += obj.i;
+ }
+ return sum;
+ }
+
+ /// CHECK-START: int Main.test12(TestClass, TestClass) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: int Main.test12(TestClass, TestClass) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ // Loop with heap writes.
+ static int test12(TestClass obj1, TestClass obj2) {
+ obj1.i = 1;
+ int sum = 0;
+ for (int i = 0; i < 10; i++) {
+ sum += obj1.i;
+ obj2.i = sum;
+ }
+ return sum;
+ }
+
+ /// CHECK-START: int Main.test13(TestClass, TestClass2) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test13(TestClass, TestClass2) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: InstanceFieldGet
+
+ // Different classes shouldn't alias.
+ static int test13(TestClass obj1, TestClass2 obj2) {
+ obj1.i = 1;
+ obj2.i = 2;
+ return obj1.i + obj2.i;
+ }
+
+ /// CHECK-START: int Main.test14(TestClass, SubTestClass) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test14(TestClass, SubTestClass) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ // Subclass may alias with super class.
+ static int test14(TestClass obj1, SubTestClass obj2) {
+ obj1.i = 1;
+ obj2.i = 2;
+ return obj1.i;
+ }
+
+ /// CHECK-START: int Main.test15() load_store_elimination (before)
+ /// CHECK: StaticFieldSet
+ /// CHECK: StaticFieldSet
+ /// CHECK: StaticFieldGet
+
+ /// CHECK-START: int Main.test15() load_store_elimination (after)
+ /// CHECK: <<Const2:i\d+>> IntConstant 2
+ /// CHECK: StaticFieldSet
+ /// CHECK: StaticFieldSet
+ /// CHECK-NOT: StaticFieldGet
+ /// CHECK: Return [<<Const2>>]
+
+ // Static field access from subclass's name.
+ static int test15() {
+ TestClass.si = 1;
+ SubTestClass.si = 2;
+ return TestClass.si;
+ }
+
+ /// CHECK-START: int Main.test16() load_store_elimination (before)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test16() load_store_elimination (after)
+ /// CHECK: NewInstance
+ /// CHECK-NOT: StaticFieldSet
+ /// CHECK-NOT: StaticFieldGet
+
+ // Test inlined constructor.
+ static int test16() {
+ TestClass obj = new TestClass(1, 2);
+ return obj.i + obj.j;
+ }
+
+ /// CHECK-START: int Main.test17() load_store_elimination (before)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test17() load_store_elimination (after)
+ /// CHECK: <<Const0:i\d+>> IntConstant 0
+ /// CHECK: NewInstance
+ /// CHECK-NOT: StaticFieldSet
+ /// CHECK-NOT: StaticFieldGet
+ /// CHECK: Return [<<Const0>>]
+
+ // Test getting default value.
+ static int test17() {
+ TestClass obj = new TestClass();
+ obj.j = 1;
+ return obj.i;
+ }
+
+ /// CHECK-START: int Main.test18(TestClass) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ /// CHECK-START: int Main.test18(TestClass) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+
+ // Volatile field load/store shouldn't be eliminated.
+ static int test18(TestClass obj) {
+ obj.k = 1;
+ return obj.k;
+ }
+
+ /// CHECK-START: float Main.test19(float[], float[]) load_store_elimination (before)
+ /// CHECK: <<IntTypeValue:i\d+>> ArrayGet
+ /// CHECK: ArraySet
+ /// CHECK: <<FloatTypeValue:f\d+>> ArrayGet
+
+ /// CHECK-START: float Main.test19(float[], float[]) load_store_elimination (after)
+ /// CHECK: <<IntTypeValue:i\d+>> ArrayGet
+ /// CHECK: ArraySet
+ /// CHECK: <<FloatTypeValue:f\d+>> ArrayGet
+
+ // I/F, J/D aliasing should keep the load/store.
+ static float test19(float[] fa1, float[] fa2) {
+ fa1[0] = fa2[0];
+ return fa1[0];
+ }
+
+ /// CHECK-START: TestClass Main.test20() load_store_elimination (before)
+ /// CHECK: NewInstance
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: TestClass Main.test20() load_store_elimination (after)
+ /// CHECK: NewInstance
+ /// CHECK-NOT: InstanceFieldSet
+
+ // Storing default heap value is redundant if the heap location has the
+ // default heap value.
+ static TestClass test20() {
+ TestClass obj = new TestClass();
+ obj.i = 0;
+ return obj;
+ }
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertFloatEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertDoubleEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main(String[] args) {
+ assertDoubleEquals(Math.PI * Math.PI * Math.PI, calcCircleArea(Math.PI));
+ assertIntEquals(test1(new TestClass(), new TestClass()), 3);
+ assertIntEquals(test2(new TestClass()), 1);
+ TestClass obj1 = new TestClass();
+ TestClass obj2 = new TestClass();
+ obj1.next = obj2;
+ assertIntEquals(test3(obj1), 10);
+ assertIntEquals(test4(new TestClass(), true), 1);
+ assertIntEquals(test4(new TestClass(), false), 1);
+ assertIntEquals(test5(new TestClass(), true), 1);
+ assertIntEquals(test5(new TestClass(), false), 2);
+ assertIntEquals(test6(new TestClass(), new TestClass(), true), 4);
+ assertIntEquals(test6(new TestClass(), new TestClass(), false), 2);
+ assertIntEquals(test7(new TestClass()), 1);
+ assertIntEquals(test8(), 1);
+ obj1 = new TestClass();
+ obj2 = new TestClass();
+ obj1.next = obj2;
+ assertIntEquals(test9(new TestClass()), 1);
+ assertIntEquals(test10(new TestClass(3, 4)), 3);
+ assertIntEquals(TestClass.si, 3);
+ assertIntEquals(test11(new TestClass()), 10);
+ assertIntEquals(test12(new TestClass(), new TestClass()), 10);
+ assertIntEquals(test13(new TestClass(), new TestClass2()), 3);
+ SubTestClass obj3 = new SubTestClass();
+ assertIntEquals(test14(obj3, obj3), 2);
+ assertIntEquals(test15(), 2);
+ assertIntEquals(test16(), 3);
+ assertIntEquals(test17(), 0);
+ assertIntEquals(test18(new TestClass()), 1);
+ float[] fa1 = { 0.8f };
+ float[] fa2 = { 1.8f };
+ assertFloatEquals(test19(fa1, fa2), 1.8f);
+ assertFloatEquals(test20().i, 0);
+ }
+}
diff --git a/test/532-checker-nonnull-arrayset/src/Main.java b/test/532-checker-nonnull-arrayset/src/Main.java
index 7d8fff4..2c701bb 100644
--- a/test/532-checker-nonnull-arrayset/src/Main.java
+++ b/test/532-checker-nonnull-arrayset/src/Main.java
@@ -29,10 +29,10 @@
/// CHECK-NOT: test
/// CHECK: ReturnVoid
public static void test() {
- Object[] array = new Object[1];
+ Object[] array = new Object[2];
Object nonNull = array[0];
nonNull.getClass(); // Ensure nonNull has an implicit null check.
- array[0] = nonNull;
+ array[1] = nonNull;
}
public static void main(String[] args) {}
diff --git a/test/955-lambda-smali/run b/test/955-lambda-smali/run
index b754680..2fb2f89 100755
--- a/test/955-lambda-smali/run
+++ b/test/955-lambda-smali/run
@@ -15,4 +15,4 @@
# limitations under the License.
# Ensure that the lambda experimental opcodes are turned on for dalvikvm and dex2oat
-${RUN} "$@" --runtime-option -Xexperimental:lambdas -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:lambdas
+${RUN} "$@" --experimental lambdas
diff --git a/test/960-default-smali/build b/test/960-default-smali/build
index c786687..06692f9 100755
--- a/test/960-default-smali/build
+++ b/test/960-default-smali/build
@@ -18,7 +18,7 @@
set -e
# Generate the smali Main.smali file or fail
-./util-src/generate_smali.py ./smali
+${ANDROID_BUILD_TOP}/art/test/utils/python/generate_smali_main.py ./smali
if [[ $@ == *"--jvm"* ]]; then
# Build the Java files if we are running a --jvm test
@@ -29,5 +29,5 @@
fi
# Build the smali files and make a dex
-${SMALI} -JXmx256m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx256m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
zip "$TEST_NAME.jar" classes.dex
diff --git a/test/960-default-smali/info.txt b/test/960-default-smali/info.txt
index eb596e2..9583abb 100644
--- a/test/960-default-smali/info.txt
+++ b/test/960-default-smali/info.txt
@@ -2,15 +2,16 @@
Obviously needs to run under ART or a Java 8 Language runtime and compiler.
-When run a Main.smali file will be generated by the util-src/generate_smali.py
-script. If we run with --jvm we will use the tools/extract-embedded-java script to
-turn the smali into equivalent Java using the embedded Java code.
+When run a Main.smali file will be generated by the
+test/utils/python/generate_smali_main.py script. If we run with --jvm we will
+use the tools/extract-embedded-java script to turn the smali into equivalent
+Java using the embedded Java code.
When updating be sure to write the equivalent Java code in comments of the smali
files.
-Care should be taken when updating the generate_smali.py script. It must always
-return equivalent output when run multiple times.
+Care should be taken when updating the generate_smali_main.py script. It must
+always return equivalent output when run multiple times.
To update the test files do the following steps:
<Add new classes/interfaces>
diff --git a/test/960-default-smali/run b/test/960-default-smali/run
index e378b06..22f6800 100755
--- a/test/960-default-smali/run
+++ b/test/960-default-smali/run
@@ -14,8 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-if echo $@ | grep -q -- "--jvm"; then
- ${RUN} "$@"
-else
- ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/961-default-iface-resolution-generated/build b/test/961-default-iface-resolution-generated/build
index 707c17e..5eb851f 100755
--- a/test/961-default-iface-resolution-generated/build
+++ b/test/961-default-iface-resolution-generated/build
@@ -40,7 +40,7 @@
fi
# Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
zip $TEST_NAME.jar classes.dex
# Reset the ulimit back to its initial value
diff --git a/test/961-default-iface-resolution-generated/run b/test/961-default-iface-resolution-generated/run
index e378b06..22f6800 100755
--- a/test/961-default-iface-resolution-generated/run
+++ b/test/961-default-iface-resolution-generated/run
@@ -14,8 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-if echo $@ | grep -q -- "--jvm"; then
- ${RUN} "$@"
-else
- ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/962-iface-static/build b/test/962-iface-static/build
index 5ad82f7..06bb3bd 100755
--- a/test/962-iface-static/build
+++ b/test/962-iface-static/build
@@ -26,5 +26,5 @@
fi
# Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
zip $TEST_NAME.jar classes.dex
diff --git a/test/962-iface-static/run b/test/962-iface-static/run
index e713708..d37737f 100755
--- a/test/962-iface-static/run
+++ b/test/962-iface-static/run
@@ -14,8 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-if echo $@ | grep -q -- "--jvm"; then
- ${RUN} "$@"
-else
- ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/963-default-range-smali/build b/test/963-default-range-smali/build
index 5ad82f7..06bb3bd 100755
--- a/test/963-default-range-smali/build
+++ b/test/963-default-range-smali/build
@@ -26,5 +26,5 @@
fi
# Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
zip $TEST_NAME.jar classes.dex
diff --git a/test/963-default-range-smali/run b/test/963-default-range-smali/run
index e713708..d37737f 100755
--- a/test/963-default-range-smali/run
+++ b/test/963-default-range-smali/run
@@ -14,8 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-if echo $@ | grep -q -- "--jvm"; then
- ${RUN} "$@"
-else
- ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/964-default-iface-init-generated/build b/test/964-default-iface-init-generated/build
index deef803..b0fbe4b 100755
--- a/test/964-default-iface-init-generated/build
+++ b/test/964-default-iface-init-generated/build
@@ -38,7 +38,7 @@
fi
# Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
zip $TEST_NAME.jar classes.dex
# Reset the ulimit back to its initial value
diff --git a/test/964-default-iface-init-generated/run b/test/964-default-iface-init-generated/run
index e378b06..22f6800 100755
--- a/test/964-default-iface-init-generated/run
+++ b/test/964-default-iface-init-generated/run
@@ -14,8 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-if echo $@ | grep -q -- "--jvm"; then
- ${RUN} "$@"
-else
- ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index e114a2e..5bbbbc1 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -41,8 +41,7 @@
ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
TEST_ART_RUN_TEST_DEPENDENCIES += \
- $(JACK_JAR) \
- $(JACK_LAUNCHER_JAR) \
+ $(JACK) \
$(JILL_JAR)
endif
@@ -61,15 +60,13 @@
run_test_options += --build-with-javac-dx
endif
$$(dmart_target): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
-$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES)
+$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
$(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
$(hide) DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
JACK=$(abspath $(JACK)) \
- JACK_VM_COMMAND="$(JACK_VM) $(DEFAULT_JACK_VM_ARGS) $(JAVA_TMPDIR_ARG) -jar $(abspath $(JACK_LAUNCHER_JAR)) " \
JACK_CLASSPATH=$(TARGET_JACK_CLASSPATH) \
- JACK_JAR=$(abspath $(JACK_JAR)) \
JILL_JAR=$(abspath $(JILL_JAR)) \
$(LOCAL_PATH)/run-test $$(PRIVATE_RUN_TEST_OPTIONS) --output-path $$(abspath $$(dir $$@)) $(1)
$(hide) touch $$@
@@ -302,10 +299,14 @@
TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS :=
# Tests that are broken with GC stress.
-# 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
-# hope the second process got into the expected state. The slowness of gcstress makes this bad.
+# * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
+# hope the second process got into the expected state. The slowness of gcstress makes this bad.
+# * 961-default-iface-resolution-generated is a very long test that often will take more than the
+# timeout to run when gcstress is enabled. This is because gcstress slows down allocations
+# significantly which this test does a lot.
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
- 137-cfi
+ 137-cfi \
+ 961-default-iface-resolution-generated
ifneq (,$(filter gcstress,$(GC_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -440,55 +441,12 @@
# Known broken tests for the mips32 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS := \
441-checker-inliner \
- 442-checker-constant-folding \
- 444-checker-nce \
- 445-checker-licm \
- 446-checker-inliner2 \
- 447-checker-inliner3 \
449-checker-bce \
- 450-checker-types \
- 455-checker-gvn \
- 458-checker-instruction-simplification \
- 462-checker-inlining-across-dex-files \
- 463-checker-boolean-simplifier \
- 464-checker-inline-sharpen-calls \
- 465-checker-clinit-gvn \
- 468-checker-bool-simplifier-regression \
- 473-checker-inliner-constants \
- 474-checker-boolean-input \
- 476-checker-ctor-memory-barrier \
- 477-checker-bound-type \
- 478-checker-clinit-check-pruning \
- 478-checker-inliner-nested-loop \
- 480-checker-dead-blocks \
- 482-checker-loop-back-edge-use \
- 484-checker-register-hints \
- 485-checker-dce-loop-update \
- 485-checker-dce-switch \
- 486-checker-must-do-null-check \
- 487-checker-inline-calls \
- 488-checker-inline-recursive-calls \
- 490-checker-inline \
- 492-checker-inline-invoke-interface \
- 493-checker-inline-invoke-interface \
- 494-checker-instanceof-tests \
- 495-checker-checkcast-tests \
- 496-checker-inlining-and-class-loader \
- 508-checker-disassembly \
510-checker-try-catch \
- 517-checker-builder-fallthrough \
521-checker-array-set-null \
- 522-checker-regression-monitor-exit \
- 523-checker-can-throw-regression \
- 525-checker-arrays-and-fields \
- 526-checker-caller-callee-regs \
529-checker-unresolved \
- 530-checker-loops \
- 530-checker-regression-reftype-final \
- 532-checker-nonnull-arrayset \
534-checker-bce-deoptimization \
536-checker-intrinsic-optimization \
- 537-checker-debuggable \
ifeq (mips,$(TARGET_ARCH))
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
@@ -691,13 +649,13 @@
uc_host_or_target := HOST
test_groups := ART_RUN_TEST_HOST_RULES
run_test_options += --host
- prereq_rule := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
+ prereq_rule := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(HOST_JACK_CLASSPATH_DEPENDENCIES)
jack_classpath := $(HOST_JACK_CLASSPATH)
else
ifeq ($(1),target)
uc_host_or_target := TARGET
test_groups := ART_RUN_TEST_TARGET_RULES
- prereq_rule := test-art-target-sync
+ prereq_rule := test-art-target-sync $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
jack_classpath := $(TARGET_JACK_CLASSPATH)
else
$$(error found $(1) expected $(TARGET_TYPES))
@@ -913,9 +871,7 @@
SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
JACK=$(abspath $(JACK)) \
- JACK_VM_COMMAND="$(JACK_VM) $(DEFAULT_JACK_VM_ARGS) $(JAVA_TMPDIR_ARG) -jar $(abspath $(JACK_LAUNCHER_JAR)) " \
JACK_CLASSPATH=$$(PRIVATE_JACK_CLASSPATH) \
- JACK_JAR=$(abspath $(JACK_JAR)) \
JILL_JAR=$(abspath $(JILL_JAR)) \
art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(12) \
&& $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
diff --git a/test/etc/default-build b/test/etc/default-build
index c92402b..4743216 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -96,7 +96,7 @@
if [ -d smali ]; then
# Compile Smali classes
- ${SMALI} -JXmx256m --experimental --api-level 23 --output smali_classes.dex `find smali -name '*.smali'`
+ ${SMALI} -JXmx256m ${SMALI_ARGS} --output smali_classes.dex `find smali -name '*.smali'`
# Don't bother with dexmerger if we provide our own main function in a smali file.
if [ ${SKIP_DX_MERGER} = "false" ]; then
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index fbefa07..280b4bc 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -18,6 +18,7 @@
DEBUGGER="n"
DEV_MODE="n"
DEX2OAT=""
+EXPERIMENTAL=""
FALSE_BIN="/system/bin/false"
FLAGS=""
GDB=""
@@ -196,6 +197,13 @@
FLAGS="${FLAGS} -Xcompiler-option --compile-pic"
COMPILE_FLAGS="${COMPILE_FLAGS} --compile-pic"
shift
+ elif [ "x$1" = "x--experimental" ]; then
+ if [ "$#" -lt 2 ]; then
+ echo "missing --experimental option" 1>&2
+ exit 1
+ fi
+ EXPERIMENTAL="$EXPERIMENTAL $2"
+ shift 2
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
exit 1
@@ -204,6 +212,13 @@
fi
done
+if [ "$USE_JVM" = "n" ]; then
+ for feature in ${EXPERIMENTAL}; do
+ FLAGS="${FLAGS} -Xexperimental:${feature}"
+ COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xexperimental:${feature}"
+ done
+fi
+
if [ "x$1" = "x" ] ; then
MAIN="Main"
else
diff --git a/test/run-test b/test/run-test
index 1b71f33..5a43fb0 100755
--- a/test/run-test
+++ b/test/run-test
@@ -46,6 +46,7 @@
export DEX_LOCATION=/data/run-test/${test_dir}
export NEED_DEX="true"
export USE_JACK="false"
+export SMALI_ARGS="--experimental --api-level 23"
# If dx was not set by the environment variable, assume it is in the path.
if [ -z "$DX" ]; then
@@ -82,24 +83,11 @@
export ANDROID_BUILD_TOP=$oldwd
fi
-# If JACK_VM_COMMAND is not set, assume it launches the prebuilt jack-launcher.
-if [ -z "$JACK_VM_COMMAND" ]; then
- if [ ! -z "$TMPDIR" ]; then
- jack_temp_dir="-Djava.io.tmpdir=$TMPDIR"
- fi
- export JACK_VM_COMMAND="java -Dfile.encoding=UTF-8 -Xms2560m -XX:+TieredCompilation $jack_temp_dir -jar $ANDROID_BUILD_TOP/prebuilts/sdk/tools/jack-launcher.jar"
-fi
-
# If JACK_CLASSPATH is not set, assume it only contains core-libart.
if [ -z "$JACK_CLASSPATH" ]; then
export JACK_CLASSPATH="$ANDROID_BUILD_TOP/out/host/common/obj/JAVA_LIBRARIES/core-libart-hostdex_intermediates/classes.jack"
fi
-# If JACK_JAR is not set, assume it is located in the prebuilts directory.
-if [ -z "$JACK_JAR" ]; then
- export JACK_JAR="$ANDROID_BUILD_TOP/prebuilts/sdk/tools/jack.jar"
-fi
-
# If JILL_JAR is not set, assume it is located in the prebuilts directory.
if [ -z "$JILL_JAR" ]; then
export JILL_JAR="$ANDROID_BUILD_TOP/prebuilts/sdk/tools/jill.jar"
diff --git a/test/960-default-smali/util-src/generate_smali.py b/test/utils/python/generate_smali_main.py
similarity index 98%
rename from test/960-default-smali/util-src/generate_smali.py
rename to test/utils/python/generate_smali_main.py
index b2bf1f0..d796d31 100755
--- a/test/960-default-smali/util-src/generate_smali.py
+++ b/test/utils/python/generate_smali_main.py
@@ -15,7 +15,7 @@
# limitations under the License.
"""
-Generate Smali Main file for test 960
+Generate Smali Main file from a classes.xml file.
"""
import os
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 71366c1..6869b04 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -74,7 +74,7 @@
AHAT_TEST_DUMP_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
$(HOST_OUT_EXECUTABLES)/art \
- $(HOST_CORE_IMG_OUT_BASE)$(CORE_IMG_SUFFIX)
+ $(HOST_CORE_IMG_OUT_BASE)-optimizing-pic$(CORE_IMG_SUFFIX)
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index 1083c2f..5615f8f 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -10,8 +10,6 @@
TODO:
* Add more tips to the help page.
- - Note that only 'app' heap matters, not 'zygote' or 'image'.
- - Say what a dex cache is.
- Recommend how to start looking at a heap dump.
- Say how to enable allocation sites.
- Where to submit feedback, questions, and bug reports.
@@ -24,6 +22,7 @@
* Show site context and heap and class filter in "Objects" view?
* Have a menu at the top of an object view with links to the sections?
* Include ahat version and hprof file in the menu at the top of the page?
+ * Show root types.
* Heaped Table
- Make sortable by clicking on headers.
- Use consistent order for heap columns.
@@ -86,7 +85,6 @@
index.
* What's the difference between getId and getUniqueId?
* I see objects with duplicate references.
- * Don't store stack trace by heap (CL 157252)
* A way to get overall retained size by heap.
* A method Instance.isReachable()
@@ -97,6 +95,9 @@
* Computing, for each instance, the other instances it dominates.
Release History:
+ 0.2 Oct 20, 2015
+ Take into account 'count' and 'offset' when displaying strings.
+
0.1ss Aug 04, 2015
Enable stack allocations code (using custom modified perflib).
Sort objects in 'objects/' with default sort.
diff --git a/tools/ahat/src/AhatSnapshot.java b/tools/ahat/src/AhatSnapshot.java
index 3035ef7..43658f3 100644
--- a/tools/ahat/src/AhatSnapshot.java
+++ b/tools/ahat/src/AhatSnapshot.java
@@ -18,14 +18,12 @@
import com.android.tools.perflib.heap.ClassObj;
import com.android.tools.perflib.heap.Heap;
-import com.android.tools.perflib.heap.HprofParser;
import com.android.tools.perflib.heap.Instance;
import com.android.tools.perflib.heap.RootObj;
import com.android.tools.perflib.heap.Snapshot;
import com.android.tools.perflib.heap.StackFrame;
import com.android.tools.perflib.heap.StackTrace;
-import com.android.tools.perflib.heap.io.HprofBuffer;
-import com.android.tools.perflib.heap.io.MemoryMappedFileBuffer;
+import com.android.tools.perflib.captures.MemoryMappedFileBuffer;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import java.io.File;
@@ -56,8 +54,7 @@
* Create an AhatSnapshot from an hprof file.
*/
public static AhatSnapshot fromHprof(File hprof) throws IOException {
- HprofBuffer buffer = new MemoryMappedFileBuffer(hprof);
- Snapshot snapshot = (new HprofParser(buffer)).parse();
+ Snapshot snapshot = Snapshot.createSnapshot(new MemoryMappedFileBuffer(hprof));
snapshot.computeDominators();
return new AhatSnapshot(snapshot);
}
@@ -185,20 +182,17 @@
// Return the stack where the given instance was allocated.
private static StackTrace getStack(Instance inst) {
- // TODO: return inst.getStack() once perflib is fixed.
- return null;
+ return inst.getStack();
}
// Return the list of stack frames for a stack trace.
private static StackFrame[] getStackFrames(StackTrace stack) {
- // TODO: Use stack.getFrames() once perflib is fixed.
- return null;
+ return stack.getFrames();
}
// Return the serial number of the given stack trace.
private static int getStackTraceSerialNumber(StackTrace stack) {
- // TODO: Use stack.getSerialNumber() once perflib is fixed.
- return 0;
+ return stack.getSerialNumber();
}
// Get the site associated with the given stack id and depth.
diff --git a/tools/ahat/src/help.html b/tools/ahat/src/help.html
index b48d791..b7ae2ce 100644
--- a/tools/ahat/src/help.html
+++ b/tools/ahat/src/help.html
@@ -54,3 +54,38 @@
</ul>
</li>
</ul>
+
+<h2>Tips:</h2>
+<h3>Heaps</h3>
+<p>
+Android heap dumps contain information for multiple heaps. The <b>app</b> heap
+is the memory used by your application. The <b>zygote</b> and <b>image</b>
+heaps are used by the system. You should ignore everything in the zygote and
+image heap and look only at the app heap. This is because changes in your
+application will not effect the zygote or image heaps, and because the zygote
+and image heaps are shared, they don't contribute significantly to your
+applications PSS.
+</p>
+
+<h3>Bitmaps</h3>
+<p>
+Bitmaps store their data using byte[] arrays. Whenever you see a large
+byte[], check if it is a bitmap by looking to see if there is a single
+android.graphics.Bitmap object referring to it. The byte[] will be marked as a
+root, but it is really being retained by the android.graphics.Bitmap object.
+</p>
+
+<h3>DexCaches</h3>
+<p>
+For each DexFile you load, there will be a corresponding DexCache whose size
+is proportional to the number of strings, fields, methods, and classes in your
+dex file. The DexCache entries may or may not be visible depending on the
+version of the Android platform the heap dump is from.
+</p>
+
+<h3>FinalizerReferences</h3>
+<p>
+A FinalizerReference is allocated for every object on the heap that has a
+non-trivial finalizer. These are stored in a linked list reachable from the
+FinalizerReference class object.
+</p>
diff --git a/tools/checker/common/archs.py b/tools/checker/common/archs.py
index 84bded9..178e0b5 100644
--- a/tools/checker/common/archs.py
+++ b/tools/checker/common/archs.py
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-archs_list = ['ARM', 'ARM64', 'MIPS64', 'X86', 'X86_64']
+archs_list = ['ARM', 'ARM64', 'MIPS', 'MIPS64', 'X86', 'X86_64']
diff --git a/tools/generate-operator-out.py b/tools/generate-operator-out.py
index c74508d..3bd62fe 100755
--- a/tools/generate-operator-out.py
+++ b/tools/generate-operator-out.py
@@ -86,8 +86,10 @@
if m:
enclosing_classes.append(m.group(1))
continue
- m = re.compile(r'^\s*\}( .*)?;').search(raw_line)
- if m:
+
+ # End of class/struct -- be careful not to match "do { ... } while" constructs by accident
+ m = re.compile(r'^\s*\}(\s+)?(while)?(.+)?;').search(raw_line)
+ if m and not m.group(2):
enclosing_classes = enclosing_classes[0:len(enclosing_classes) - 1]
continue
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 7ada189..81ea79a 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -157,5 +157,19 @@
modes: [device],
names: ["libcore.java.util.TimeZoneTest#testAllDisplayNames"],
bug: 22786792
+},
+{
+ description: "Formatting failures",
+ result: EXEC_FAILED,
+ names: ["libcore.java.text.NumberFormatTest#test_currencyFromLocale",
+ "libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits"],
+ bug: 25136848
+},
+{
+ description: "Lack of IPv6 on some buildbot slaves",
+ result: EXEC_FAILED,
+ names: ["libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet6",
+ "libcore.io.OsTest#test_sendtoSocketAddress_af_inet6"],
+ bug: 25178637
}
]