Merge "art/test/100-reflect2: change incorrect expectations."
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 547e92e..f3e1cc3 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -100,6 +100,9 @@
# Do you want failed tests to have their artifacts cleaned up?
ART_TEST_RUN_TEST_ALWAYS_CLEAN ?= true
+# Do you want run-tests with the --debuggable flag
+ART_TEST_RUN_TEST_DEBUGGABLE ?= $(ART_TEST_FULL)
+
# Define the command run on test failure. $(1) is the name of the test. Executed by the shell.
define ART_TEST_FAILED
( [ -f $(ART_HOST_TEST_DIR)/skipped/$(1) ] || \
@@ -157,6 +160,10 @@
# $(4): additional dependencies
# $(5): a make variable used to collate target dependencies, e.g ART_TEST_TARGET_OAT_HelloWorld_DEX
# $(6): a make variable used to collate host dependencies, e.g ART_TEST_HOST_OAT_HelloWorld_DEX
+#
+# If the input test directory contains a file called main.list and main.jpp,
+# then a multi-dex file is created passing main.list as the --main-dex-list
+# argument to dx and main.jpp for Jack.
define build-art-test-dex
ifeq ($(ART_BUILD_TARGET),true)
include $(CLEAR_VARS)
@@ -169,6 +176,10 @@
LOCAL_JAVA_LIBRARIES := $(TARGET_CORE_JARS)
LOCAL_MODULE_PATH := $(3)
LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
+ ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
+ LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
+ LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
+ endif
include $(BUILD_JAVA_LIBRARY)
$(5) := $$(LOCAL_INSTALLED_MODULE)
endif
@@ -181,6 +192,10 @@
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4)
LOCAL_JAVA_LIBRARIES := $(HOST_CORE_JARS)
LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
+ ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
+ LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
+ LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
+ endif
include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
$(6) := $$(LOCAL_INSTALLED_MODULE)
endif
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 7ab4d64..6b6a9e0 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -28,6 +28,7 @@
GetMethodSignature \
Interfaces \
Main \
+ MultiDex \
MyClass \
MyClassNatives \
Nested \
@@ -45,6 +46,19 @@
$(ART_TARGET_NATIVETEST_OUT),art/build/Android.gtest.mk,ART_TEST_TARGET_GTEST_$(dir)_DEX, \
ART_TEST_HOST_GTEST_$(dir)_DEX)))
+# Create rules for MainStripped, a copy of Main with the classes.dex stripped
+# for the oat file assistant tests.
+ART_TEST_HOST_GTEST_MainStripped_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
+ART_TEST_TARGET_GTEST_MainStripped_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
+
+$(ART_TEST_HOST_GTEST_MainStripped_DEX): $(ART_TEST_HOST_GTEST_Main_DEX)
+ cp $< $@
+ $(call dexpreopt-remove-classes.dex,$@)
+
+$(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX)
+ cp $< $@
+ $(call dexpreopt-remove-classes.dex,$@)
+
# Dex file dependencies for each gtest.
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MyClass Nested Statics StaticsFromCode
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod
@@ -52,6 +66,7 @@
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
+ART_GTEST_oat_file_assistant_test_DEX_DEPS := Main MainStripped MultiDex Nested
ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
ART_GTEST_proxy_test_DEX_DEPS := Interfaces
ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
@@ -62,6 +77,15 @@
ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_default_no-pic_64) $(TARGET_CORE_IMAGE_default_no-pic_32)
+ART_GTEST_oat_file_assistant_test_HOST_DEPS := \
+ $(HOST_CORE_IMAGE_default_no-pic_64) \
+ $(HOST_CORE_IMAGE_default_no-pic_32) \
+ $(HOST_OUT_EXECUTABLES)/patchoatd
+ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \
+ $(TARGET_CORE_IMAGE_default_no-pic_64) \
+ $(TARGET_CORE_IMAGE_default_no-pic_32) \
+ $(TARGET_OUT_EXECUTABLES)/patchoatd
+
# TODO: document why this is needed.
ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
@@ -137,10 +161,12 @@
runtime/java_vm_ext_test.cc \
runtime/leb128_test.cc \
runtime/mem_map_test.cc \
+ runtime/memory_region_test.cc \
runtime/mirror/dex_cache_test.cc \
runtime/mirror/object_test.cc \
runtime/monitor_pool_test.cc \
runtime/monitor_test.cc \
+ runtime/oat_file_assistant_test.cc \
runtime/parsed_options_test.cc \
runtime/reference_table_test.cc \
runtime/thread_pool_test.cc \
@@ -462,12 +488,12 @@
endef # define-art-gtest
ifeq ($(ART_BUILD_TARGET),true)
- $(foreach file,$(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),,)))
- $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler)))
+ $(foreach file,$(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),,libbacktrace)))
+ $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler libbacktrace)))
endif
ifeq ($(ART_BUILD_HOST),true)
- $(foreach file,$(RUNTIME_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),,)))
- $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),art/compiler,libartd-compiler)))
+ $(foreach file,$(RUNTIME_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),,libbacktrace)))
+ $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),art/compiler,libartd-compiler libbacktrace)))
endif
# Used outside the art project to get a list of the current tests
@@ -559,6 +585,9 @@
ART_GTEST_elf_writer_test_TARGET_DEPS :=
ART_GTEST_jni_compiler_test_DEX_DEPS :=
ART_GTEST_jni_internal_test_DEX_DEPS :=
+ART_GTEST_oat_file_assistant_test_DEX_DEPS :=
+ART_GTEST_oat_file_assistant_test_HOST_DEPS :=
+ART_GTEST_oat_file_assistant_test_TARGET_DEPS :=
ART_GTEST_object_test_DEX_DEPS :=
ART_GTEST_proxy_test_DEX_DEPS :=
ART_GTEST_reflection_test_DEX_DEPS :=
@@ -567,5 +596,7 @@
ART_VALGRIND_DEPENDENCIES :=
$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_TARGET_GTEST_$(dir)_DEX :=))
$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_HOST_GTEST_$(dir)_DEX :=))
+ART_TEST_HOST_GTEST_MainStripped_DEX :=
+ART_TEST_TARGET_GTEST_MainStripped_DEX :=
GTEST_DEX_DIRECTORIES :=
LOCAL_PATH :=
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 130eed2..9f873b3 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -260,6 +260,13 @@
}
EXPECT_SINGLE_PARSE_FAIL("-verbose:blablabla", CmdlineResult::kUsage); // invalid verbose opt
+
+ {
+ const char* log_args = "-verbose:oat";
+ LogVerbosity log_verbosity = LogVerbosity();
+ log_verbosity.oat = true;
+ EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose);
+ }
} // TEST_F
// TODO: Enable this b/19274810
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index de99278..03165ed 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -591,6 +591,8 @@
log_verbosity.jni = true;
} else if (verbose_options[j] == "monitor") {
log_verbosity.monitor = true;
+ } else if (verbose_options[j] == "oat") {
+ log_verbosity.oat = true;
} else if (verbose_options[j] == "profiler") {
log_verbosity.profiler = true;
} else if (verbose_options[j] == "signals") {
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 86a27c1..0906753 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -48,6 +48,12 @@
dex/quick/mips/int_mips.cc \
dex/quick/mips/target_mips.cc \
dex/quick/mips/utility_mips.cc \
+ dex/quick/mips64/assemble_mips64.cc \
+ dex/quick/mips64/call_mips64.cc \
+ dex/quick/mips64/fp_mips64.cc \
+ dex/quick/mips64/int_mips64.cc \
+ dex/quick/mips64/target_mips64.cc \
+ dex/quick/mips64/utility_mips64.cc \
dex/quick/mir_to_lir.cc \
dex/quick/quick_compiler.cc \
dex/quick/ralloc_util.cc \
@@ -83,6 +89,7 @@
jni/quick/arm/calling_convention_arm.cc \
jni/quick/arm64/calling_convention_arm64.cc \
jni/quick/mips/calling_convention_mips.cc \
+ jni/quick/mips64/calling_convention_mips64.cc \
jni/quick/x86/calling_convention_x86.cc \
jni/quick/x86_64/calling_convention_x86_64.cc \
jni/quick/calling_convention.cc \
@@ -154,6 +161,7 @@
dex/quick/arm/arm_lir.h \
dex/quick/arm64/arm64_lir.h \
dex/quick/mips/mips_lir.h \
+ dex/quick/mips64/mips64_lir.h \
dex/quick/resource_mask.h \
dex/compiler_enums.h \
dex/global_value_numbering.h \
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 7e916be..fcefb6f 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -252,10 +252,8 @@
}
}
-void DexCompiler::CompileInvokeVirtual(Instruction* inst,
- uint32_t dex_pc,
- Instruction::Code new_opcode,
- bool is_range) {
+void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
+ Instruction::Code new_opcode, bool is_range) {
if (!kEnableQuickening || !PerformOptimizations()) {
return;
}
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index 3d3d979..34fb1bf 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -58,8 +58,9 @@
auto current_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve methods and record all available info.
+ Runtime* const runtime = Runtime::Current();
const DexFile* const dex_file = mUnit->GetDexFile();
- const bool use_jit = Runtime::Current()->UseJit();
+ const bool use_jit = runtime->UseJit();
const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod();
for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
@@ -80,7 +81,7 @@
it->target_method_idx_ = it->MethodIndex();
current_dex_cache.Assign(dex_cache.Get());
resolved_method = compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit,
- it->MethodIndex(), invoke_type);
+ it->target_method_idx_, invoke_type, true);
} else {
// The method index is actually the dex PC in this case.
// Calculate the proper dex file and target method idx.
@@ -89,8 +90,7 @@
// Don't devirt if we are in a different dex file since we can't have direct invokes in
// another dex file unless we always put a direct / patch pointer.
devirt_target = nullptr;
- current_dex_cache.Assign(
- Runtime::Current()->GetClassLinker()->FindDexCache(*it->target_dex_file_));
+ current_dex_cache.Assign(runtime->GetClassLinker()->FindDexCache(*it->target_dex_file_));
CHECK(current_dex_cache.Get() != nullptr);
DexCompilationUnit cu(
mUnit->GetCompilationUnit(), mUnit->GetClassLoader(), mUnit->GetClassLinker(),
@@ -99,6 +99,14 @@
nullptr /* verified_method not used */);
resolved_method = compiler_driver->ResolveMethod(soa, current_dex_cache, class_loader, &cu,
it->target_method_idx_, invoke_type, false);
+ if (resolved_method == nullptr) {
+ // If the method is null then it should be a miranda method, in this case try
+ // re-loading it, this time as an interface method. The actual miranda method is in the
+ // vtable, but it will resolve to an interface method.
+ resolved_method = compiler_driver->ResolveMethod(
+ soa, current_dex_cache, class_loader, &cu, it->target_method_idx_, kInterface, false);
+ CHECK(resolved_method != nullptr);
+ }
if (resolved_method != nullptr) {
// Since this was a dequickened virtual, it is guaranteed to be resolved. However, it may be
// resolved to an interface method. If this is the case then change the invoke type to
@@ -123,10 +131,9 @@
it->vtable_idx_ =
compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type);
}
-
MethodReference target_method(it->target_dex_file_, it->target_method_idx_);
int fast_path_flags = compiler_driver->IsFastInvoke(
- soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method,
+ soa, current_dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method,
&invoke_type, &target_method, devirt_target, &it->direct_code_, &it->direct_method_);
const bool is_referrers_class = referrer_class.Get() == resolved_method->GetDeclaringClass();
const bool is_class_initialized =
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 0bac511..029c0ca 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -541,6 +541,7 @@
break;
case kArm64:
case kMips:
+ case kMips64:
bx_offset = tab_rec->anchor->offset;
break;
default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
@@ -1203,6 +1204,7 @@
LIR* load_pc_rel = OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target);
AppendLIR(load_pc_rel);
DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
+ DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
}
void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
@@ -1220,6 +1222,7 @@
LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
AppendLIR(load_pc_rel);
DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
+ DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
}
void Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index afae89d..e57889a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -482,6 +482,7 @@
r_val = AllocTemp();
break;
case kMips:
+ case kMips64:
r_val = AllocTemp();
break;
default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
@@ -1695,7 +1696,8 @@
StoreValue(rl_dest, rl_result);
} else {
bool done = false; // Set to true if we happen to find a way to use a real instruction.
- if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64 ||
+ cu_->instruction_set == kArm64) {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
@@ -1990,7 +1992,8 @@
}
bool done = false;
- if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64 ||
+ cu_->instruction_set == kArm64) {
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
done = true;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 01f1d37..6b553fd 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -222,7 +222,8 @@
RegLocation arg0, RegLocation arg1,
bool safepoint_pc) {
RegStorage r_tgt = CallHelperSetup(trampoline);
- if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
+ if (cu_->instruction_set == kArm64 || cu_->instruction_set == kMips64 ||
+ cu_->instruction_set == kX86_64) {
RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
RegStorage arg1_reg;
@@ -900,8 +901,8 @@
}
bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
@@ -1028,8 +1029,8 @@
// Generates an inlined String.is_empty or String.length.
bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
// dst = src.length();
@@ -1060,8 +1061,8 @@
}
bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation.
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info); // result reg
@@ -1195,8 +1196,8 @@
}
bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
RegLocation rl_dest = InlineTarget(info);
@@ -1210,8 +1211,8 @@
}
bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
RegLocation rl_dest = InlineTargetWide(info);
@@ -1281,8 +1282,8 @@
/* Fast string.compareTo(Ljava/lang/string;)I. */
bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
ClobberCallerSave();
@@ -1336,8 +1337,8 @@
bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
bool is_long, bool is_volatile) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
// Unused - RegLocation rl_src_unsafe = info->args[0];
@@ -1381,8 +1382,8 @@
bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
bool is_object, bool is_volatile, bool is_ordered) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
// Unused - RegLocation rl_src_unsafe = info->args[0];
diff --git a/compiler/dex/quick/mips64/assemble_mips64.cc b/compiler/dex/quick/mips64/assemble_mips64.cc
new file mode 100644
index 0000000..17a0ef1
--- /dev/null
+++ b/compiler/dex/quick/mips64/assemble_mips64.cc
@@ -0,0 +1,898 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/compiler_ir.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+#define MAX_ASSEMBLER_RETRIES 50
+
+/*
+ * opcode: Mips64OpCode enum
+ * skeleton: pre-designated bit-pattern for this opcode
+ * k0: key to applying ds/de
+ * ds: dest start bit position
+ * de: dest end bit position
+ * k1: key to applying s1s/s1e
+ * s1s: src1 start bit position
+ * s1e: src1 end bit position
+ * k2: key to applying s2s/s2e
+ * s2s: src2 start bit position
+ * s2e: src2 end bit position
+ * operands: number of operands (for sanity check purposes)
+ * name: mnemonic name
+ * fmt: for pretty-printing
+ */
+#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
+ k3, k3s, k3e, flags, name, fmt, size) \
+ {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
+ {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
+
+/* Instruction dump string format keys: !pf, where "!" is the start
+ * of the key, "p" is which numeric operand to use and "f" is the
+ * print format.
+ *
+ * [p]ositions:
+ * 0 -> operands[0] (dest)
+ * 1 -> operands[1] (src1)
+ * 2 -> operands[2] (src2)
+ * 3 -> operands[3] (extra)
+ *
+ * [f]ormats:
+ * h -> 4-digit hex
+ * d -> decimal
+ * E -> decimal*4
+ * F -> decimal*2
+ * c -> branch condition (beq, bne, etc.)
+ * t -> pc-relative target
+ * T -> pc-region target
+ * u -> 1st half of bl[x] target
+ * v -> 2nd half ob bl[x] target
+ * R -> register list
+ * s -> single precision floating point register
+ * S -> double precision floating point register
+ * m -> Thumb2 modified immediate
+ * n -> complimented Thumb2 modified immediate
+ * M -> Thumb2 16-bit zero-extended immediate
+ * b -> 4-digit binary
+ * N -> append a NOP
+ *
+ * [!] escape. To insert "!", use "!!"
+ */
+/* NOTE: must be kept in sync with enum Mips64Opcode from mips64_lir.h */
+/*
+ * TUNING: We're currently punting on the branch delay slots. All branch
+ * instructions in this map are given a size of 8, which during assembly
+ * is expanded to include a nop. This scheme should be replaced with
+ * an assembler pass to fill those slots when possible.
+ */
+const Mips64EncodingMap Mips64Mir2Lir::EncodingMap[kMips64Last] = {
+ ENCODING_MAP(kMips6432BitData, 0x00000000,
+ kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP,
+ "data", "0x!0h(!0d)", 4),
+ ENCODING_MAP(kMips64Addiu, 0x24000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "addiu", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Addu, 0x00000021,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "addu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64And, 0x00000024,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "and", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Andi, 0x30000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "andi", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64B, 0x10000000,
+ kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
+ "b", "!0t!0N", 8),
+ ENCODING_MAP(kMips64Bal, 0x04110000,
+ kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR |
+ NEEDS_FIXUP, "bal", "!0t!0N", 8),
+ ENCODING_MAP(kMips64Beq, 0x10000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
+ NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
+ ENCODING_MAP(kMips64Beqz, 0x10000000, // Same as beq above with t = $zero.
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Bgez, 0x04010000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bgez", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Bgtz, 0x1c000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bgtz", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Blez, 0x18000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "blez", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Bltz, 0x04000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Bnez, 0x14000000, // Same as bne below with t = $zero.
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Bne, 0x14000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
+ NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
+ ENCODING_MAP(kMips64Break, 0x0000000d,
+ kFmtBitBlt, 25, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP, "break", "!0d", 4),
+ ENCODING_MAP(kMips64Daddiu, 0x64000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "daddiu", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Daddu, 0x0000002d,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "daddu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dahi, 0x04060000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+ "dahi", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Dati, 0x041E0000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+ "dati", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Daui, 0x74000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "daui", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Ddiv, 0x0000009e,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "ddiv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Div, 0x0000009a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmod, 0x000000de,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dmod", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmul, 0x0000009c,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dmul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmfc1, 0x44200000,
+ kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "dmfc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Dmtc1, 0x44a00000,
+ kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+ "dmtc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Drotr32, 0x0000003e | (1 << 21),
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "drotr32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsll, 0x00000038,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsll", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsll32, 0x0000003c,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsll32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsrl, 0x0000003a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsrl", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsrl32, 0x0000003e,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsrl32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsra, 0x0000003b,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsra", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsra32, 0x0000003f,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsra32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsllv, 0x00000014,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsllv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsrlv, 0x00000016,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsrlv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsrav, 0x00000017,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsrav", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsubu, 0x0000002f,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsubu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Ext, 0x7c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
+ kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
+ "ext", "!0r,!1r,!2d,!3D", 4),
+ ENCODING_MAP(kMips64Faddd, 0x46200000,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "add.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMips64Fadds, 0x46000000,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "add.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMips64Fdivd, 0x46200003,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMips64Fdivs, 0x46000003,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMips64Fmuld, 0x46200002,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMips64Fmuls, 0x46000002,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMips64Fsubd, 0x46200001,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sub.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMips64Fsubs, 0x46000001,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sub.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMips64Fcvtsd, 0x46200020,
+ kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.s.d", "!0s,!1S", 4),
+ ENCODING_MAP(kMips64Fcvtsw, 0x46800020,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.s.w", "!0s,!1s", 4),
+ ENCODING_MAP(kMips64Fcvtds, 0x46000021,
+ kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.d.s", "!0S,!1s", 4),
+ ENCODING_MAP(kMips64Fcvtdw, 0x46800021,
+ kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.d.w", "!0S,!1s", 4),
+ ENCODING_MAP(kMips64Fcvtws, 0x46000024,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.w.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMips64Fcvtwd, 0x46200024,
+ kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.w.d", "!0s,!1S", 4),
+ ENCODING_MAP(kMips64Fmovd, 0x46200006,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov.d", "!0S,!1S", 4),
+ ENCODING_MAP(kMips64Fmovs, 0x46000006,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMips64Fnegd, 0x46200007,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "neg.d", "!0S,!1S", 4),
+ ENCODING_MAP(kMips64Fnegs, 0x46000007,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "neg.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMips64Fldc1, 0xd4000000,
+ kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "ldc1", "!0S,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Flwc1, 0xc4000000,
+ kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lwc1", "!0s,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Fsdc1, 0xf4000000,
+ kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sdc1", "!0S,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Fswc1, 0xe4000000,
+ kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "swc1", "!0s,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Jal, 0x0c000000,
+ kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
+ "jal", "!0T(!0E)!0N", 8),
+ ENCODING_MAP(kMips64Jalr, 0x00000009,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF0_USE1,
+ "jalr", "!0r,!1r!0N", 8),
+ ENCODING_MAP(kMips64Lahi, 0x3c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "lahi/lui", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Lalo, 0x34000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "lalo/ori", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Lb, 0x80000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lb", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lbu, 0x90000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lbu", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Ld, 0xdc000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "ld", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lh, 0x84000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lh", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lhu, 0x94000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lhu", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lui, 0x3c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "lui", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Lw, 0x8c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lw", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lwu, 0x9c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lwu", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Mfc1, 0x44000000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mfc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Mtc1, 0x44800000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+ "mtc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Move, 0x0000002d, // Or using zero reg.
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "move", "!0r,!1r", 4),
+ ENCODING_MAP(kMips64Mod, 0x000000da,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mod", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Mul, 0x00000098,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Nop, 0x00000000,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND,
+ "nop", ";", 4),
+ ENCODING_MAP(kMips64Nor, 0x00000027, // Used for "not" too.
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "nor", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Or, 0x00000025,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "or", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Ori, 0x34000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "ori", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Sb, 0xa0000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sb", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Sd, 0xfc000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sd", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Seb, 0x7c000420,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "seb", "!0r,!1r", 4),
+ ENCODING_MAP(kMips64Seh, 0x7c000620,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "seh", "!0r,!1r", 4),
+ ENCODING_MAP(kMips64Sh, 0xa4000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sh", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Sll, 0x00000000,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "sll", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Sllv, 0x00000004,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sllv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Slt, 0x0000002a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "slt", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Slti, 0x28000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "slti", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Sltu, 0x0000002b,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sltu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Sra, 0x00000003,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "sra", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Srav, 0x00000007,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "srav", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Srl, 0x00000002,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "srl", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Srlv, 0x00000006,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "srlv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Subu, 0x00000023, // Used for "neg" too.
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "subu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Sw, 0xac000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sw", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Sync, 0x0000000f,
+ kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP,
+ "sync", ";", 4),
+ ENCODING_MAP(kMips64Xor, 0x00000026,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "xor", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Xori, 0x38000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "xori", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64CurrPC, 0x04110001,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
+ "addiu", "ra,pc,8", 4),
+ ENCODING_MAP(kMips64Delta, 0x67e00000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
+ kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
+ NEEDS_FIXUP, "daddiu", "!0r,ra,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64DeltaHi, 0x3c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | NEEDS_FIXUP,
+ "lui", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64DeltaLo, 0x34000000,
+ kFmtBlt5_2, 16, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0_USE0 | NEEDS_FIXUP,
+ "ori", "!0r,!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Undefined, 0x64000000,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND,
+ "undefined", "", 4),
+};
+
+
+/*
+ * Convert a short-form branch to long form. Hopefully, this won't happen
+ * very often because the PIC sequence is especially unfortunate.
+ *
+ * Orig conditional branch
+ * -----------------------
+ * beq rs,rt,target
+ *
+ * Long conditional branch
+ * -----------------------
+ * bne rs,rt,hop
+ * bal .+8 ; rRA <- anchor
+ * lui rAT, ((target-anchor) >> 16)
+ * anchor:
+ * ori rAT, rAT, ((target-anchor) & 0xffff)
+ * addu rAT, rAT, rRA
+ * jalr rZERO, rAT
+ * hop:
+ *
+ * Orig unconditional branch
+ * -------------------------
+ * b target
+ *
+ * Long unconditional branch
+ * -----------------------
+ * bal .+8 ; rRA <- anchor
+ * lui rAT, ((target-anchor) >> 16)
+ * anchor:
+ * ori rAT, rAT, ((target-anchor) & 0xffff)
+ * addu rAT, rAT, rRA
+ * jalr rZERO, rAT
+ *
+ *
+ * NOTE: An out-of-range bal isn't supported because it should
+ * never happen with the current PIC model.
+ */
+void Mips64Mir2Lir::ConvertShortToLongBranch(LIR* lir) {
+ // For conditional branches we'll need to reverse the sense
+ bool unconditional = false;
+ int opcode = lir->opcode;
+ int dalvik_offset = lir->dalvik_offset;
+ switch (opcode) {
+ case kMips64Bal:
+ LOG(FATAL) << "long branch and link unsupported";
+ UNREACHABLE();
+ case kMips64B:
+ unconditional = true;
+ break;
+ case kMips64Beq: opcode = kMips64Bne; break;
+ case kMips64Bne: opcode = kMips64Beq; break;
+ case kMips64Beqz: opcode = kMips64Bnez; break;
+ case kMips64Bgez: opcode = kMips64Bltz; break;
+ case kMips64Bgtz: opcode = kMips64Blez; break;
+ case kMips64Blez: opcode = kMips64Bgtz; break;
+ case kMips64Bltz: opcode = kMips64Bgez; break;
+ case kMips64Bnez: opcode = kMips64Beqz; break;
+ default:
+ LOG(FATAL) << "Unexpected branch kind " << opcode;
+ UNREACHABLE();
+ }
+ LIR* hop_target = NULL;
+ if (!unconditional) {
+ hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
+ LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
+ lir->operands[1], 0, 0, 0, hop_target);
+ InsertLIRBefore(lir, hop_branch);
+ }
+ LIR* curr_pc = RawLIR(dalvik_offset, kMips64CurrPC);
+ InsertLIRBefore(lir, curr_pc);
+ LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
+ LIR* delta_hi = RawLIR(dalvik_offset, kMips64DeltaHi, rAT, 0, WrapPointer(anchor), 0, 0,
+ lir->target);
+ InsertLIRBefore(lir, delta_hi);
+ InsertLIRBefore(lir, anchor);
+ LIR* delta_lo = RawLIR(dalvik_offset, kMips64DeltaLo, rAT, 0, WrapPointer(anchor), 0, 0,
+ lir->target);
+ InsertLIRBefore(lir, delta_lo);
+ LIR* addu = RawLIR(dalvik_offset, kMips64Addu, rAT, rAT, rRA);
+ InsertLIRBefore(lir, addu);
+ LIR* jalr = RawLIR(dalvik_offset, kMips64Jalr, rZERO, rAT);
+ InsertLIRBefore(lir, jalr);
+ if (!unconditional) {
+ InsertLIRBefore(lir, hop_target);
+ }
+ NopLIR(lir);
+}
+
+/*
+ * Assemble the LIR into binary instruction format. Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction. In those cases we will try to substitute a new code
+ * sequence or request that the trace be shortened and retried.
+ */
+AssemblerStatus Mips64Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
+ LIR *lir;
+ AssemblerStatus res = kSuccess; // Assume success.
+
+ for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ if (lir->opcode < 0) {
+ continue;
+ }
+
+ if (lir->flags.is_nop) {
+ continue;
+ }
+
+ if (lir->flags.fixup != kFixupNone) {
+ if (lir->opcode == kMips64Delta) {
+ /*
+ * The "Delta" pseudo-ops load the difference between
+ * two pc-relative locations into a the target register
+ * found in operands[0]. The delta is determined by
+ * (label2 - label1), where label1 is a standard
+ * kPseudoTargetLabel and is stored in operands[2].
+ * If operands[3] is null, then label2 is a kPseudoTargetLabel
+ * and is found in lir->target. If operands[3] is non-NULL,
+ * then it is a Switch/Data table.
+ */
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
+ // Fits.
+ lir->operands[1] = delta;
+ } else {
+ // Doesn't fit - must expand to kMips64Delta[Hi|Lo] pair.
+ LIR *new_delta_hi = RawLIR(lir->dalvik_offset, kMips64DeltaHi, lir->operands[0], 0,
+ lir->operands[2], lir->operands[3], 0, lir->target);
+ InsertLIRBefore(lir, new_delta_hi);
+ LIR *new_delta_lo = RawLIR(lir->dalvik_offset, kMips64DeltaLo, lir->operands[0], 0,
+ lir->operands[2], lir->operands[3], 0, lir->target);
+ InsertLIRBefore(lir, new_delta_lo);
+ LIR *new_addu = RawLIR(lir->dalvik_offset, kMips64Daddu, lir->operands[0],
+ lir->operands[0], rRAd);
+ InsertLIRBefore(lir, new_addu);
+ NopLIR(lir);
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kMips64DeltaLo) {
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ lir->operands[1] = delta & 0xffff;
+ } else if (lir->opcode == kMips64DeltaHi) {
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ lir->operands[1] = (delta >> 16) & 0xffff;
+ } else if (lir->opcode == kMips64B || lir->opcode == kMips64Bal) {
+ LIR *target_lir = lir->target;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ ConvertShortToLongBranch(lir);
+ } else {
+ lir->operands[0] = delta >> 2;
+ }
+ } else if (lir->opcode >= kMips64Beqz && lir->opcode <= kMips64Bnez) {
+ LIR *target_lir = lir->target;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ ConvertShortToLongBranch(lir);
+ } else {
+ lir->operands[1] = delta >> 2;
+ }
+ } else if (lir->opcode == kMips64Beq || lir->opcode == kMips64Bne) {
+ LIR *target_lir = lir->target;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ ConvertShortToLongBranch(lir);
+ } else {
+ lir->operands[2] = delta >> 2;
+ }
+ } else if (lir->opcode == kMips64Jal) {
+ CodeOffset cur_pc = (start_addr + lir->offset + 4) & ~3;
+ CodeOffset target = lir->operands[0];
+ /* ensure PC-region branch can be used */
+ DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
+ if (target & 0x3) {
+ LOG(FATAL) << "Jump target not multiple of 4: " << target;
+ }
+ lir->operands[0] = target >> 2;
+ } else if (lir->opcode == kMips64Lahi) { /* ld address hi (via lui) */
+ LIR *target_lir = lir->target;
+ CodeOffset target = start_addr + target_lir->offset;
+ lir->operands[1] = target >> 16;
+ } else if (lir->opcode == kMips64Lalo) { /* ld address lo (via ori) */
+ LIR *target_lir = lir->target;
+ CodeOffset target = start_addr + target_lir->offset;
+ lir->operands[2] = lir->operands[2] + target;
+ }
+ }
+
+ /*
+ * If one of the pc-relative instructions expanded we'll have
+ * to make another pass. Don't bother to fully assemble the
+ * instruction.
+ */
+ if (res != kSuccess) {
+ continue;
+ }
+ DCHECK(!IsPseudoLirOp(lir->opcode));
+ const Mips64EncodingMap *encoder = &EncodingMap[lir->opcode];
+ uint32_t bits = encoder->skeleton;
+ int i;
+ for (i = 0; i < 4; i++) {
+ uint32_t operand;
+ uint32_t value;
+ operand = lir->operands[i];
+ switch (encoder->field_loc[i].kind) {
+ case kFmtUnused:
+ break;
+ case kFmtBitBlt:
+ if (encoder->field_loc[i].start == 0 && encoder->field_loc[i].end == 31) {
+ value = operand;
+ } else {
+ value = (operand << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
+ }
+ bits |= value;
+ break;
+ case kFmtBlt5_2:
+ value = (operand & 0x1f);
+ bits |= (value << encoder->field_loc[i].start);
+ bits |= (value << encoder->field_loc[i].end);
+ break;
+ case kFmtDfp: {
+ // TODO: do we need to adjust now that we're using 64BitSolo?
+ DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
+ value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ }
+ case kFmtSfp:
+ DCHECK(RegStorage::IsSingle(operand)) << ", Operand = 0x" << std::hex << operand;
+ value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ default:
+ LOG(FATAL) << "Bad encoder format: " << encoder->field_loc[i].kind;
+ }
+ }
+ // We only support little-endian MIPS64.
+ code_buffer_.push_back(bits & 0xff);
+ code_buffer_.push_back((bits >> 8) & 0xff);
+ code_buffer_.push_back((bits >> 16) & 0xff);
+ code_buffer_.push_back((bits >> 24) & 0xff);
+ // TUNING: replace with proper delay slot handling.
+ if (encoder->size == 8) {
+ DCHECK(!IsPseudoLirOp(lir->opcode));
+ const Mips64EncodingMap *encoder2 = &EncodingMap[kMips64Nop];
+ uint32_t bits2 = encoder2->skeleton;
+ code_buffer_.push_back(bits2 & 0xff);
+ code_buffer_.push_back((bits2 >> 8) & 0xff);
+ code_buffer_.push_back((bits2 >> 16) & 0xff);
+ code_buffer_.push_back((bits2 >> 24) & 0xff);
+ }
+ }
+ return res;
+}
+
+size_t Mips64Mir2Lir::GetInsnSize(LIR* lir) {
+ DCHECK(!IsPseudoLirOp(lir->opcode));
+ return EncodingMap[lir->opcode].size;
+}
+
+// LIR offset assignment.
+// TODO: consolidate w/ Arm assembly mechanism.
+int Mips64Mir2Lir::AssignInsnOffsets() {
+ LIR* lir;
+ int offset = 0;
+
+ for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ lir->offset = offset;
+ if (LIKELY(lir->opcode >= 0)) {
+ if (!lir->flags.is_nop) {
+ offset += lir->flags.size;
+ }
+ } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
+ if (offset & 0x2) {
+ offset += 2;
+ lir->operands[0] = 1;
+ } else {
+ lir->operands[0] = 0;
+ }
+ }
+ // Pseudo opcodes don't consume space.
+ }
+ return offset;
+}
+
+/*
+ * Walk the compilation unit and assign offsets to instructions
+ * and literals and compute the total size of the compiled unit.
+ * TODO: consolidate w/ Arm assembly mechanism.
+ */
+void Mips64Mir2Lir::AssignOffsets() {
+ int offset = AssignInsnOffsets();
+
+ // Const values have to be word aligned.
+ offset = RoundUp(offset, 4);
+
+ // Set up offsets for literals.
+ data_offset_ = offset;
+
+ offset = AssignLiteralOffset(offset);
+
+ offset = AssignSwitchTablesOffset(offset);
+
+ offset = AssignFillArrayDataOffset(offset);
+
+ total_size_ = offset;
+}
+
+/*
+ * Go over each instruction in the list and calculate the offset from the top
+ * before sending them off to the assembler. If out-of-range branch distance is
+ * seen rearrange the instructions a bit to correct it.
+ * TODO: consolidate w/ Arm assembly mechanism.
+ */
+void Mips64Mir2Lir::AssembleLIR() {
+ cu_->NewTimingSplit("Assemble");
+ AssignOffsets();
+ int assembler_retries = 0;
+ /*
+ * Assemble here. Note that we generate code with optimistic assumptions
+ * and if found now to work, we'll have to redo the sequence and retry.
+ */
+
+ while (true) {
+ AssemblerStatus res = AssembleInstructions(0);
+ if (res == kSuccess) {
+ break;
+ } else {
+ assembler_retries++;
+ if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
+ CodegenDump();
+ LOG(FATAL) << "Assembler error - too many retries";
+ }
+ // Redo offsets and try again.
+ AssignOffsets();
+ code_buffer_.clear();
+ }
+ }
+
+ // Install literals.
+ InstallLiteralPools();
+
+ // Install switch tables.
+ InstallSwitchTables();
+
+ // Install fill array data.
+ InstallFillArrayData();
+
+ // Create the mapping table and native offset to reference map.
+ cu_->NewTimingSplit("PcMappingTable");
+ CreateMappingTables();
+
+ cu_->NewTimingSplit("GcMap");
+ CreateNativeGcMap();
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips64/backend_mips64.h b/compiler/dex/quick/mips64/backend_mips64.h
new file mode 100644
index 0000000..cc30ae0
--- /dev/null
+++ b/compiler/dex/quick/mips64/backend_mips64.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
+#define ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
+
+namespace art {
+
+struct CompilationUnit;
+class Mir2Lir;
+class MIRGraph;
+class ArenaAllocator;
+
+Mir2Lir* Mips64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+ ArenaAllocator* const arena);
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
diff --git a/compiler/dex/quick/mips64/call_mips64.cc b/compiler/dex/quick/mips64/call_mips64.cc
new file mode 100644
index 0000000..63cef7e
--- /dev/null
+++ b/compiler/dex/quick/mips64/call_mips64.cc
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Mips64 ISA */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
+#include "mips64_lir.h"
+#include "mirror/art_method.h"
+#include "mirror/object_array-inl.h"
+
+namespace art {
+
+bool Mips64Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
+ // TODO
+ UNUSED(bb, mir, special);
+ return false;
+}
+
+/*
+ * The lack of pc-relative loads on Mips64 presents somewhat of a challenge
+ * for our PIC switch table strategy. To materialize the current location
+ * we'll do a dummy JAL and reference our tables using rRA as the
+ * base register. Note that rRA will be used both as the base to
+ * locate the switch table data and as the reference base for the switch
+ * target offsets stored in the table. We'll use a special pseudo-instruction
+ * to represent the jal and trigger the construction of the
+ * switch table offsets (which will happen after final assembly and all
+ * labels are fixed).
+ *
+ * The test loop will look something like:
+ *
+ * ori r_end, rZERO, #table_size ; size in bytes
+ * jal BaseLabel ; stores "return address" (BaseLabel) in rRA
+ * nop ; opportunistically fill
+ * BaseLabel:
+ * addiu r_base, rRA, <table> - <BaseLabel> ; table relative to BaseLabel
+ addu r_end, r_end, r_base ; end of table
+ * lw r_val, [rSP, v_reg_off] ; Test Value
+ * loop:
+ * beq r_base, r_end, done
+ * lw r_key, 0(r_base)
+ * addu r_base, 8
+ * bne r_val, r_key, loop
+ * lw r_disp, -4(r_base)
+ * addu rRA, r_disp
+ * jalr rZERO, rRA
+ * done:
+ *
+ */
+void Mips64Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+ const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
+ // Add the table to the list - we'll process it later.
+ SwitchTable* tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable),
+ kArenaAllocData));
+ tab_rec->switch_mir = mir;
+ tab_rec->table = table;
+ tab_rec->vaddr = current_dalvik_offset_;
+ int elements = table[1];
+ switch_tables_.push_back(tab_rec);
+
+ // The table is composed of 8-byte key/disp pairs.
+ int byte_size = elements * 8;
+
+ int size_hi = byte_size >> 16;
+ int size_lo = byte_size & 0xffff;
+
+ RegStorage r_end = AllocTempWide();
+ if (size_hi) {
+ NewLIR2(kMips64Lui, r_end.GetReg(), size_hi);
+ }
+ // Must prevent code motion for the curr pc pair.
+ GenBarrier(); // Scheduling barrier.
+ NewLIR0(kMips64CurrPC); // Really a jal to .+8.
+ // Now, fill the branch delay slot.
+ if (size_hi) {
+ NewLIR3(kMips64Ori, r_end.GetReg(), r_end.GetReg(), size_lo);
+ } else {
+ NewLIR3(kMips64Ori, r_end.GetReg(), rZERO, size_lo);
+ }
+ GenBarrier(); // Scheduling barrier.
+
+ // Construct BaseLabel and set up table base register.
+ LIR* base_label = NewLIR0(kPseudoTargetLabel);
+ // Remember base label so offsets can be computed later.
+ tab_rec->anchor = base_label;
+ RegStorage r_base = AllocTempWide();
+ NewLIR4(kMips64Delta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
+ OpRegRegReg(kOpAdd, r_end, r_end, r_base);
+
+ // Grab switch test value.
+ rl_src = LoadValue(rl_src, kCoreReg);
+
+ // Test loop.
+ RegStorage r_key = AllocTemp();
+ LIR* loop_label = NewLIR0(kPseudoTargetLabel);
+ LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+ Load32Disp(r_base, 0, r_key);
+ OpRegImm(kOpAdd, r_base, 8);
+ OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
+ RegStorage r_disp = AllocTemp();
+ Load32Disp(r_base, -4, r_disp);
+ OpRegRegReg(kOpAdd, TargetReg(kLr, kWide), TargetReg(kLr, kWide), r_disp);
+ OpReg(kOpBx, TargetReg(kLr, kWide));
+
+ // Loop exit.
+ LIR* exit_label = NewLIR0(kPseudoTargetLabel);
+ exit_branch->target = exit_label;
+}
+
+/*
+ * Code pattern will look something like:
+ *
+ * lw r_val
+ * jal BaseLabel ; stores "return address" (BaseLabel) in rRA
+ * nop ; opportunistically fill
+ * [subiu r_val, bias] ; Remove bias if low_val != 0
+ * bound check -> done
+ * lw r_disp, [rRA, r_val]
+ * addu rRA, r_disp
+ * jalr rZERO, rRA
+ * done:
+ */
+void Mips64Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+ const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
+ // Add the table to the list - we'll process it later.
+ SwitchTable* tab_rec =
+ static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
+ tab_rec->switch_mir = mir;
+ tab_rec->table = table;
+ tab_rec->vaddr = current_dalvik_offset_;
+ int size = table[1];
+ switch_tables_.push_back(tab_rec);
+
+ // Get the switch value.
+ rl_src = LoadValue(rl_src, kCoreReg);
+
+ // Prepare the bias. If too big, handle 1st stage here.
+ int low_key = s4FromSwitchData(&table[2]);
+ bool large_bias = false;
+ RegStorage r_key;
+ if (low_key == 0) {
+ r_key = rl_src.reg;
+ } else if ((low_key & 0xffff) != low_key) {
+ r_key = AllocTemp();
+ LoadConstant(r_key, low_key);
+ large_bias = true;
+ } else {
+ r_key = AllocTemp();
+ }
+
+ // Must prevent code motion for the curr pc pair.
+ GenBarrier();
+ NewLIR0(kMips64CurrPC); // Really a jal to .+8.
+ // Now, fill the branch delay slot with bias strip.
+ if (low_key == 0) {
+ NewLIR0(kMips64Nop);
+ } else {
+ if (large_bias) {
+ OpRegRegReg(kOpSub, r_key, rl_src.reg, r_key);
+ } else {
+ OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key);
+ }
+ }
+ GenBarrier(); // Scheduling barrier.
+
+ // Construct BaseLabel and set up table base register.
+ LIR* base_label = NewLIR0(kPseudoTargetLabel);
+ // Remember base label so offsets can be computed later.
+ tab_rec->anchor = base_label;
+
+ // Bounds check - if < 0 or >= size continue following switch.
+ LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+
+ // Materialize the table base pointer.
+ RegStorage r_base = AllocTempWide();
+ NewLIR4(kMips64Delta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
+
+ // Load the displacement from the switch table.
+ RegStorage r_disp = AllocTemp();
+ LoadBaseIndexed(r_base, r_key, r_disp, 2, k32);
+
+ // Add to rAP and go.
+ OpRegRegReg(kOpAdd, TargetReg(kLr, kWide), TargetReg(kLr, kWide), r_disp);
+ OpReg(kOpBx, TargetReg(kLr, kWide));
+
+ // Branch_over target here.
+ LIR* target = NewLIR0(kPseudoTargetLabel);
+ branch_over->target = target;
+}
+
+void Mips64Mir2Lir::GenMoveException(RegLocation rl_dest) {
+ int ex_offset = Thread::ExceptionOffset<8>().Int32Value();
+ RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
+ RegStorage reset_reg = AllocTempRef();
+ LoadRefDisp(rs_rMIPS64_SELF, ex_offset, rl_result.reg, kNotVolatile);
+ LoadConstant(reset_reg, 0);
+ StoreRefDisp(rs_rMIPS64_SELF, ex_offset, reset_reg, kNotVolatile);
+ FreeTemp(reset_reg);
+ StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
+ RegStorage reg_card_base = AllocTempWide();
+ RegStorage reg_card_no = AllocTempWide();
+ // NOTE: native pointer.
+ LoadWordDisp(rs_rMIPS64_SELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
+ OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+ StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 0, kUnsignedByte);
+ FreeTemp(reg_card_base);
+ FreeTemp(reg_card_no);
+}
+
+void Mips64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
+ int spill_count = num_core_spills_ + num_fp_spills_;
+ /*
+ * On entry, rMIPS64_ARG0, rMIPS64_ARG1, rMIPS64_ARG2, rMIPS64_ARG3,
+ * rMIPS64_ARG4, rMIPS64_ARG5, rMIPS64_ARG6 & rMIPS64_ARG7 are live.
+ * Let the register allocation mechanism know so it doesn't try to
+ * use any of them when expanding the frame or flushing.
+ */
+ LockTemp(rs_rMIPS64_ARG0);
+ LockTemp(rs_rMIPS64_ARG1);
+ LockTemp(rs_rMIPS64_ARG2);
+ LockTemp(rs_rMIPS64_ARG3);
+ LockTemp(rs_rMIPS64_ARG4);
+ LockTemp(rs_rMIPS64_ARG5);
+ LockTemp(rs_rMIPS64_ARG6);
+ LockTemp(rs_rMIPS64_ARG7);
+
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_,
+ kMips64);
+ NewLIR0(kPseudoMethodEntry);
+ RegStorage check_reg = AllocTempWide();
+ RegStorage new_sp = AllocTempWide();
+ if (!skip_overflow_check) {
+ // Load stack limit.
+ LoadWordDisp(rs_rMIPS64_SELF, Thread::StackEndOffset<8>().Int32Value(), check_reg);
+ }
+ // Spill core callee saves.
+ SpillCoreRegs();
+ // NOTE: promotion of FP regs currently unsupported, thus no FP spill.
+ DCHECK_EQ(num_fp_spills_, 0);
+ const int frame_sub = frame_size_ - spill_count * 8;
+ if (!skip_overflow_check) {
+ class StackOverflowSlowPath : public LIRSlowPath {
+ public:
+ StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) {
+ }
+ void Compile() OVERRIDE {
+ m2l_->ResetRegPool();
+ m2l_->ResetDefTracking();
+ GenerateTargetLabel(kPseudoThrowTarget);
+ // Load RA from the top of the frame.
+ m2l_->LoadWordDisp(rs_rMIPS64_SP, sp_displace_ - 8, rs_rRAd);
+ m2l_->OpRegImm(kOpAdd, rs_rMIPS64_SP, sp_displace_);
+ m2l_->ClobberCallerSave();
+ RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow); // Doesn't clobber LR.
+ m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
+ false /* UseLink */);
+ }
+
+ private:
+ const size_t sp_displace_;
+ };
+ OpRegRegImm(kOpSub, new_sp, rs_rMIPS64_SP, frame_sub);
+ LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
+ AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 8));
+ // TODO: avoid copy for small frame sizes.
+ OpRegCopy(rs_rMIPS64_SP, new_sp); // Establish stack.
+ } else {
+ OpRegImm(kOpSub, rs_rMIPS64_SP, frame_sub);
+ }
+
+ FlushIns(ArgLocs, rl_method);
+
+ FreeTemp(rs_rMIPS64_ARG0);
+ FreeTemp(rs_rMIPS64_ARG1);
+ FreeTemp(rs_rMIPS64_ARG2);
+ FreeTemp(rs_rMIPS64_ARG3);
+ FreeTemp(rs_rMIPS64_ARG4);
+ FreeTemp(rs_rMIPS64_ARG5);
+ FreeTemp(rs_rMIPS64_ARG6);
+ FreeTemp(rs_rMIPS64_ARG7);
+}
+
+void Mips64Mir2Lir::GenExitSequence() {
+ /*
+ * In the exit path, rMIPS64_RET0/rMIPS64_RET1 are live - make sure they aren't
+ * allocated by the register utilities as temps.
+ */
+ LockTemp(rs_rMIPS64_RET0);
+ LockTemp(rs_rMIPS64_RET1);
+
+ NewLIR0(kPseudoMethodExit);
+ UnSpillCoreRegs();
+ OpReg(kOpBx, rs_rRAd);
+}
+
+void Mips64Mir2Lir::GenSpecialExitSequence() {
+ OpReg(kOpBx, rs_rRAd);
+}
+
+void Mips64Mir2Lir::GenSpecialEntryForSuspend() {
+ // Keep 16-byte stack alignment - push A0, i.e. ArtMethod* and RA.
+ core_spill_mask_ = (1u << rs_rRAd.GetRegNum());
+ num_core_spills_ = 1u;
+ fp_spill_mask_ = 0u;
+ num_fp_spills_ = 0u;
+ frame_size_ = 16u;
+ core_vmap_table_.clear();
+ fp_vmap_table_.clear();
+ OpRegImm(kOpSub, rs_rMIPS64_SP, frame_size_);
+ StoreWordDisp(rs_rMIPS64_SP, frame_size_ - 8, rs_rRAd);
+ StoreWordDisp(rs_rMIPS64_SP, 0, rs_rA0d);
+}
+
+void Mips64Mir2Lir::GenSpecialExitForSuspend() {
+ // Pop the frame. Don't pop ArtMethod*, it's no longer needed.
+ LoadWordDisp(rs_rMIPS64_SP, frame_size_ - 8, rs_rRAd);
+ OpRegImm(kOpAdd, rs_rMIPS64_SP, frame_size_);
+}
+
+/*
+ * Bit of a hack here - in the absence of a real scheduling pass,
+ * emit the next instruction in static & direct invoke sequences.
+ */
+static int Mips64NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, int state,
+ const MethodReference& target_method, uint32_t,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type) {
+ Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+ if (direct_code != 0 && direct_method != 0) {
+ switch (state) {
+ case 0: // Get the current Method* [sets kArg0]
+ if (direct_code != static_cast<uintptr_t>(-1)) {
+ cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ } else {
+ cg->LoadCodeAddress(target_method, type, kInvokeTgt);
+ }
+ if (direct_method != static_cast<uintptr_t>(-1)) {
+ cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+ } else {
+ cg->LoadMethodAddress(target_method, type, kArg0);
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: // Get the current Method* [sets kArg0]
+ // TUNING: we can save a reg copy if Method* has been promoted.
+ cg->LoadCurrMethodDirect(arg0_ref);
+ break;
+ case 1: // Get method->dex_cache_resolved_methods_
+ cg->LoadRefDisp(arg0_ref, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ arg0_ref, kNotVolatile);
+ // Set up direct code if known.
+ if (direct_code != 0) {
+ if (direct_code != static_cast<uintptr_t>(-1)) {
+ cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ } else {
+ CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
+ cg->LoadCodeAddress(target_method, type, kInvokeTgt);
+ }
+ }
+ break;
+ case 2: // Grab target method*
+ CHECK_EQ(cu->dex_file, target_method.dex_file);
+ cg->LoadRefDisp(arg0_ref, mirror::ObjectArray<mirror::Object>::
+ OffsetOfElement(target_method.dex_method_index).Int32Value(), arg0_ref,
+ kNotVolatile);
+ break;
+ case 3: // Grab the code from the method*
+ if (direct_code == 0) {
+ int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ InstructionSetPointerSize(cu->instruction_set)).Int32Value();
+ // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
+ cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
+ }
+ break;
+ default:
+ return -1;
+ }
+ }
+ return state + 1;
+}
+
+NextCallInsn Mips64Mir2Lir::GetNextSDCallInsn() {
+ return Mips64NextSDCallInsn;
+}
+
+LIR* Mips64Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info ATTRIBUTE_UNUSED) {
+ return OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips64/codegen_mips64.h b/compiler/dex/quick/mips64/codegen_mips64.h
new file mode 100644
index 0000000..57c30d8
--- /dev/null
+++ b/compiler/dex/quick/mips64/codegen_mips64.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
+#define ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
+
+#include "dex/quick/mir_to_lir.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+struct CompilationUnit;
+
+class Mips64Mir2Lir FINAL : public Mir2Lir {
+ protected:
+ class InToRegStorageMips64Mapper : public InToRegStorageMapper {
+ public:
+ explicit InToRegStorageMips64Mapper(Mir2Lir* m2l) : m2l_(m2l), cur_arg_reg_(0) {}
+ virtual RegStorage GetNextReg(ShortyArg arg);
+ virtual void Reset() OVERRIDE {
+ cur_arg_reg_ = 0;
+ }
+ protected:
+ Mir2Lir* m2l_;
+ private:
+ size_t cur_arg_reg_;
+ };
+
+ InToRegStorageMips64Mapper in_to_reg_storage_mips64_mapper_;
+ InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
+ in_to_reg_storage_mips64_mapper_.Reset();
+ return &in_to_reg_storage_mips64_mapper_;
+ }
+
+ public:
+ Mips64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+
+ // Required for target - codegen utilities.
+ bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+ RegLocation rl_dest, int lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, int32_t constant)
+ OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, int64_t constant)
+ OVERRIDE;
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
+ RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+ LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+ OpSize size) OVERRIDE;
+ LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
+ LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+ OpSize size) OVERRIDE;
+ LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
+ LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
+
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
+
+ // Required for target - register utilities.
+ RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
+ RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE {
+ if (wide_kind == kWide || wide_kind == kRef) {
+ return As64BitReg(TargetReg(reg));
+ } else {
+ return Check32BitReg(TargetReg(reg));
+ }
+ }
+ RegStorage TargetPtrReg(SpecialTargetRegister reg) OVERRIDE {
+ return As64BitReg(TargetReg(reg));
+ }
+ RegLocation GetReturnAlt();
+ RegLocation GetReturnWideAlt();
+ RegLocation LocCReturn();
+ RegLocation LocCReturnRef();
+ RegLocation LocCReturnDouble();
+ RegLocation LocCReturnFloat();
+ RegLocation LocCReturnWide();
+ ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
+ void AdjustSpillMask();
+ void ClobberCallerSave();
+ void FreeCallTemps();
+ void LockCallTemps();
+ void CompilerInitializeRegAlloc();
+
+ // Required for target - miscellaneous.
+ void AssembleLIR();
+ int AssignInsnOffsets();
+ void AssignOffsets();
+ AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+ void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+ void SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+ ResourceMask* def_mask) OVERRIDE;
+ const char* GetTargetInstFmt(int opcode);
+ const char* GetTargetInstName(int opcode);
+ std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ ResourceMask GetPCUseDefEncoding() const OVERRIDE;
+ uint64_t GetTargetInstFlags(int opcode);
+ size_t GetInsnSize(LIR* lir) OVERRIDE;
+ bool IsUnconditionalBranch(LIR* lir);
+
+ // Get the register class for load/store of a field.
+ RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+
+ // Required for target - Dalvik-level generators.
+ void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation lr_shift);
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, int flags);
+ void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_dest, int scale);
+ void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_src, int scale, bool card_mark);
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_shift, int flags);
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+ bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+ bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
+ bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
+ bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
+ bool GenInlinedSqrt(CallInfo* info);
+ bool GenInlinedPeek(CallInfo* info, OpSize size);
+ bool GenInlinedPoke(CallInfo* info, OpSize size);
+ void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, int flags) OVERRIDE;
+ RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
+ void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivZeroCheckWide(RegStorage reg);
+ void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+ void GenExitSequence();
+ void GenSpecialExitSequence() OVERRIDE;
+ void GenSpecialEntryForSuspend() OVERRIDE;
+ void GenSpecialExitForSuspend() OVERRIDE;
+ void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+ void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+ void GenSelect(BasicBlock* bb, MIR* mir);
+ void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ RegisterClass dest_reg_class) OVERRIDE;
+ bool GenMemBarrier(MemBarrierKind barrier_kind);
+ void GenMoveException(RegLocation rl_dest);
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ int first_bit, int second_bit);
+ void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+ void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
+
+ // Required for target - single operation generators.
+ LIR* OpUnconditionalBranch(LIR* target);
+ LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
+ LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
+ LIR* OpCondBranch(ConditionCode cc, LIR* target);
+ LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
+ LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpIT(ConditionCode cond, const char* guide);
+ void OpEndIT(LIR* it);
+ LIR* OpMem(OpKind op, RegStorage r_base, int disp);
+ LIR* OpPcRelLoad(RegStorage reg, LIR* target);
+ LIR* OpReg(OpKind op, RegStorage r_dest_src);
+ void OpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
+ LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
+ LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
+ LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
+ LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
+ LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
+ LIR* OpTestSuspend(LIR* target);
+ LIR* OpVldm(RegStorage r_base, int count);
+ LIR* OpVstm(RegStorage r_base, int count);
+ void OpRegCopyWide(RegStorage dest, RegStorage src);
+
+ // TODO: collapse r_dest.
+ LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
+ // TODO: collapse r_src.
+ LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+ void SpillCoreRegs();
+ void UnSpillCoreRegs();
+ static const Mips64EncodingMap EncodingMap[kMips64Last];
+ bool InexpensiveConstantInt(int32_t value);
+ bool InexpensiveConstantFloat(int32_t value);
+ bool InexpensiveConstantLong(int64_t value);
+ bool InexpensiveConstantDouble(int64_t value);
+
+ bool WideGPRsAreAliases() const OVERRIDE {
+ return true; // 64b architecture.
+ }
+ bool WideFPRsAreAliases() const OVERRIDE {
+ return true; // 64b architecture.
+ }
+
+ LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+ RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, int flags) OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div)
+ OVERRIDE;
+ NextCallInsn GetNextSDCallInsn() OVERRIDE;
+ LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
+ // Unimplemented intrinsics.
+ bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
+ OVERRIDE {
+ return false;
+ }
+
+ private:
+ void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div, int flags);
+ void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
+ RegisterClass reg_class);
+
+ void ConvertShortToLongBranch(LIR* lir);
+
+ /**
+ * @param reg #RegStorage containing a Solo64 input register (e.g. @c a1 or @c d0).
+ * @return A Solo32 with the same register number as the @p reg (e.g. @c a1 or @c f0).
+ * @see As64BitReg
+ */
+ RegStorage As32BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 64b register";
+ } else {
+ LOG(WARNING) << "Expected 64b register";
+ return reg;
+ }
+ }
+ RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
+
+ RegStorage Check32BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 32b register";
+ } else {
+ LOG(WARNING) << "Checked for 32b register";
+ return As32BitReg(reg);
+ }
+ }
+ return reg;
+ }
+
+ /**
+ * @param reg #RegStorage containing a Solo32 input register (e.g. @c a1 or @c f0).
+ * @return A Solo64 with the same register number as the @p reg (e.g. @c a1 or @c d0).
+ */
+ RegStorage As64BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 32b register";
+ } else {
+ LOG(WARNING) << "Expected 32b register";
+ return reg;
+ }
+ }
+ RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
+
+ RegStorage Check64BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 64b register";
+ } else {
+ LOG(WARNING) << "Checked for 64b register";
+ return As64BitReg(reg);
+ }
+ }
+ return reg;
+ }
+
+ void GenBreakpoint(int code);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
diff --git a/compiler/dex/quick/mips64/fp_mips64.cc b/compiler/dex/quick/mips64/fp_mips64.cc
new file mode 100644
index 0000000..5c8ee9c
--- /dev/null
+++ b/compiler/dex/quick/mips64/fp_mips64.cc
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+void Mips64Mir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
+ int op = kMips64Nop;
+ RegLocation rl_result;
+
+ /*
+ * Don't attempt to optimize register usage since these opcodes call out to
+ * the handlers.
+ */
+ switch (opcode) {
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::ADD_FLOAT:
+ op = kMips64Fadds;
+ break;
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT:
+ op = kMips64Fsubs;
+ break;
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT:
+ op = kMips64Fdivs;
+ break;
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT:
+ op = kMips64Fmuls;
+ break;
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT:
+ FlushAllRegs(); // Send everything to home location.
+ CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
+ rl_result = GetReturn(kFPReg);
+ StoreValue(rl_dest, rl_result);
+ return;
+ case Instruction::NEG_FLOAT:
+ GenNegFloat(rl_dest, rl_src1);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ rl_src1 = LoadValue(rl_src1, kFPReg);
+ rl_src2 = LoadValue(rl_src2, kFPReg);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
+ int op = kMips64Nop;
+ RegLocation rl_result;
+
+ switch (opcode) {
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ op = kMips64Faddd;
+ break;
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE:
+ op = kMips64Fsubd;
+ break;
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE:
+ op = kMips64Fdivd;
+ break;
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE:
+ op = kMips64Fmuld;
+ break;
+ case Instruction::REM_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE:
+ FlushAllRegs(); // Send everything to home location.
+ CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(kFPReg);
+ StoreValueWide(rl_dest, rl_result);
+ return;
+ case Instruction::NEG_DOUBLE:
+ GenNegDouble(rl_dest, rl_src1);
+ return;
+ default:
+ LOG(FATAL) << "Unpexpected opcode: " << opcode;
+ }
+ rl_src1 = LoadValueWide(rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_src2 = LoadValueWide(rl_src2, kFPReg);
+ DCHECK(rl_src2.wide);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) {
+ // TODO: need mips64 implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips64";
+}
+
+void Mips64Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) {
+ // TODO: need mips64 implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips64";
+}
+
+void Mips64Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src) {
+ int op = kMips64Nop;
+ RegLocation rl_result;
+ switch (opcode) {
+ case Instruction::INT_TO_FLOAT:
+ op = kMips64Fcvtsw;
+ break;
+ case Instruction::DOUBLE_TO_FLOAT:
+ op = kMips64Fcvtsd;
+ break;
+ case Instruction::FLOAT_TO_DOUBLE:
+ op = kMips64Fcvtds;
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ op = kMips64Fcvtdw;
+ break;
+ case Instruction::FLOAT_TO_INT:
+ GenConversionCall(kQuickF2iz, rl_dest, rl_src, kCoreReg);
+ return;
+ case Instruction::DOUBLE_TO_INT:
+ GenConversionCall(kQuickD2iz, rl_dest, rl_src, kCoreReg);
+ return;
+ case Instruction::LONG_TO_DOUBLE:
+ GenConversionCall(kQuickL2d, rl_dest, rl_src, kFPReg);
+ return;
+ case Instruction::FLOAT_TO_LONG:
+ GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
+ return;
+ case Instruction::LONG_TO_FLOAT:
+ GenConversionCall(kQuickL2f, rl_dest, rl_src, kFPReg);
+ return;
+ case Instruction::DOUBLE_TO_LONG:
+ GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ } else {
+ rl_src = LoadValue(rl_src, kFPReg);
+ }
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ if (rl_dest.wide) {
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ StoreValue(rl_dest, rl_result);
+ }
+}
+
+void Mips64Mir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ bool wide = true;
+ QuickEntrypointEnum target;
+
+ switch (opcode) {
+ case Instruction::CMPL_FLOAT:
+ target = kQuickCmplFloat;
+ wide = false;
+ break;
+ case Instruction::CMPG_FLOAT:
+ target = kQuickCmpgFloat;
+ wide = false;
+ break;
+ case Instruction::CMPL_DOUBLE:
+ target = kQuickCmplDouble;
+ break;
+ case Instruction::CMPG_DOUBLE:
+ target = kQuickCmpgDouble;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ target = kQuickCmplFloat;
+ }
+ FlushAllRegs();
+ LockCallTemps();
+ if (wide) {
+ RegStorage r_tmp1(RegStorage::k64BitSolo, rMIPS64_FARG0);
+ RegStorage r_tmp2(RegStorage::k64BitSolo, rMIPS64_FARG1);
+ LoadValueDirectWideFixed(rl_src1, r_tmp1);
+ LoadValueDirectWideFixed(rl_src2, r_tmp2);
+ } else {
+ LoadValueDirectFixed(rl_src1, rs_rMIPS64_FARG0);
+ LoadValueDirectFixed(rl_src2, rs_rMIPS64_FARG1);
+ }
+ RegStorage r_tgt = LoadHelper(target);
+ // NOTE: not a safepoint.
+ OpReg(kOpBlx, r_tgt);
+ RegLocation rl_result = GetReturn(kCoreReg);
+ StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
+ UNUSED(bb, mir, gt_bias, is_double);
+ UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
+}
+
+void Mips64Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
+ RegLocation rl_result;
+ rl_src = LoadValue(rl_src, kFPReg);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(kMips64Fnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
+ RegLocation rl_result;
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ rl_result = EvalLocWide(rl_dest, kFPReg, true);
+ NewLIR2(kMips64Fnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
+bool Mips64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
+ // TODO: need Mips64 implementation.
+ UNUSED(info, is_min, is_long);
+ return false;
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips64/int_mips64.cc b/compiler/dex/quick/mips64/int_mips64.cc
new file mode 100644
index 0000000..8a57c82
--- /dev/null
+++ b/compiler/dex/quick/mips64/int_mips64.cc
@@ -0,0 +1,692 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Mips64 ISA */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "dex/reg_storage_eq.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "mips64_lir.h"
+#include "mirror/array-inl.h"
+
+namespace art {
+
+/*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ *
+ * slt temp, x, y; # (x < y) ? 1:0
+ * slt res, y, x; # (x > y) ? 1:0
+ * subu res, res, temp; # res = -1:1:0 for [ < > = ]
+ *
+ */
+void Mips64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegStorage temp = AllocTempWide();
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR3(kMips64Slt, temp.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ NewLIR3(kMips64Slt, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+ NewLIR3(kMips64Subu, rl_result.reg.GetReg(), rl_result.reg.GetReg(), temp.GetReg());
+ FreeTemp(temp);
+ StoreValue(rl_dest, rl_result);
+}
+
+LIR* Mips64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
+ LIR* branch;
+ Mips64OpCode slt_op;
+ Mips64OpCode br_op;
+ bool cmp_zero = false;
+ bool swapped = false;
+ switch (cond) {
+ case kCondEq:
+ br_op = kMips64Beq;
+ cmp_zero = true;
+ break;
+ case kCondNe:
+ br_op = kMips64Bne;
+ cmp_zero = true;
+ break;
+ case kCondUlt:
+ slt_op = kMips64Sltu;
+ br_op = kMips64Bnez;
+ break;
+ case kCondUge:
+ slt_op = kMips64Sltu;
+ br_op = kMips64Beqz;
+ break;
+ case kCondGe:
+ slt_op = kMips64Slt;
+ br_op = kMips64Beqz;
+ break;
+ case kCondGt:
+ slt_op = kMips64Slt;
+ br_op = kMips64Bnez;
+ swapped = true;
+ break;
+ case kCondLe:
+ slt_op = kMips64Slt;
+ br_op = kMips64Beqz;
+ swapped = true;
+ break;
+ case kCondLt:
+ slt_op = kMips64Slt;
+ br_op = kMips64Bnez;
+ break;
+ case kCondHi: // Gtu
+ slt_op = kMips64Sltu;
+ br_op = kMips64Bnez;
+ swapped = true;
+ break;
+ default:
+ LOG(FATAL) << "No support for ConditionCode: " << cond;
+ return NULL;
+ }
+ if (cmp_zero) {
+ branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
+ } else {
+ RegStorage t_reg = AllocTemp();
+ if (swapped) {
+ NewLIR3(slt_op, t_reg.GetReg(), src2.GetReg(), src1.GetReg());
+ } else {
+ NewLIR3(slt_op, t_reg.GetReg(), src1.GetReg(), src2.GetReg());
+ }
+ branch = NewLIR1(br_op, t_reg.GetReg());
+ FreeTemp(t_reg);
+ }
+ branch->target = target;
+ return branch;
+}
+
+LIR* Mips64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg,
+ int check_value, LIR* target) {
+ LIR* branch;
+ if (check_value != 0) {
+ // TUNING: handle s16 & kCondLt/Mi case using slti.
+ RegStorage t_reg = AllocTemp();
+ LoadConstant(t_reg, check_value);
+ branch = OpCmpBranch(cond, reg, t_reg, target);
+ FreeTemp(t_reg);
+ return branch;
+ }
+ Mips64OpCode opc;
+ switch (cond) {
+ case kCondEq: opc = kMips64Beqz; break;
+ case kCondGe: opc = kMips64Bgez; break;
+ case kCondGt: opc = kMips64Bgtz; break;
+ case kCondLe: opc = kMips64Blez; break;
+ // case KCondMi:
+ case kCondLt: opc = kMips64Bltz; break;
+ case kCondNe: opc = kMips64Bnez; break;
+ default:
+ // Tuning: use slti when applicable.
+ RegStorage t_reg = AllocTemp();
+ LoadConstant(t_reg, check_value);
+ branch = OpCmpBranch(cond, reg, t_reg, target);
+ FreeTemp(t_reg);
+ return branch;
+ }
+ branch = NewLIR1(opc, reg.GetReg());
+ branch->target = target;
+ return branch;
+}
+
+LIR* Mips64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
+ DCHECK(!r_dest.IsPair() && !r_src.IsPair());
+ if (r_dest.IsFloat() || r_src.IsFloat())
+ return OpFpRegCopy(r_dest, r_src);
+ // TODO: Check that r_src and r_dest are both 32 or both 64 bits length.
+ LIR* res;
+ if (r_dest.Is64Bit() || r_src.Is64Bit()) {
+ res = RawLIR(current_dalvik_offset_, kMips64Move, r_dest.GetReg(), r_src.GetReg());
+ } else {
+ res = RawLIR(current_dalvik_offset_, kMips64Sll, r_dest.GetReg(), r_src.GetReg(), 0);
+ }
+ if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
+ }
+ return res;
+}
+
+void Mips64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ if (r_dest != r_src) {
+ LIR *res = OpRegCopyNoInsert(r_dest, r_src);
+ AppendLIR(res);
+ }
+}
+
+void Mips64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
+ OpRegCopy(r_dest, r_src);
+}
+
+void Mips64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ RegisterClass dest_reg_class) {
+ UNUSED(dest_reg_class);
+ // Implement as a branch-over.
+ // TODO: Conditional move?
+ LoadConstant(rs_dest, true_val);
+ LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+ LoadConstant(rs_dest, false_val);
+ LIR* target_label = NewLIR0(kPseudoTargetLabel);
+ ne_branchover->target = target_label;
+}
+
+void Mips64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
+ UNIMPLEMENTED(FATAL) << "Need codegen for select";
+}
+
+void Mips64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
+ UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
+}
+
+RegLocation Mips64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
+ bool is_div) {
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR3(is_div ? kMips64Div : kMips64Mod, rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
+ return rl_result;
+}
+
+RegLocation Mips64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit,
+ bool is_div) {
+ RegStorage t_reg = AllocTemp();
+ NewLIR3(kMips64Addiu, t_reg.GetReg(), rZERO, lit);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR3(is_div ? kMips64Div : kMips64Mod, rl_result.reg.GetReg(), reg1.GetReg(), t_reg.GetReg());
+ FreeTemp(t_reg);
+ return rl_result;
+}
+
+RegLocation Mips64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+ LOG(FATAL) << "Unexpected use of GenDivRem for Mips64";
+ UNREACHABLE();
+}
+
+RegLocation Mips64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
+ LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips64";
+ UNREACHABLE();
+}
+
+bool Mips64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
+ UNUSED(info, is_long, is_object);
+ return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
+ UNUSED(info);
+ // TODO: add Mips64 implementation.
+ return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
+ UNUSED(info);
+ // TODO: add Mips64 implementation.
+ return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedSqrt(CallInfo* info) {
+ UNUSED(info);
+ return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
+ if (size != kSignedByte) {
+ // MIPS64 supports only aligned access. Defer unaligned access to JNI implementation.
+ return false;
+ }
+ RegLocation rl_src_address = info->args[0]; // Long address.
+ RegLocation rl_dest = InlineTarget(info);
+ RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ DCHECK(size == kSignedByte);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
+ StoreValue(rl_dest, rl_result);
+ return true;
+}
+
+bool Mips64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
+ if (size != kSignedByte) {
+ // MIPS64 supports only aligned access. Defer unaligned access to JNI implementation.
+ return false;
+ }
+ RegLocation rl_src_address = info->args[0]; // Long address.
+ RegLocation rl_src_value = info->args[2]; // [size] value.
+ RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
+ DCHECK(size == kSignedByte);
+ RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+ return true;
+}
+
+LIR* Mips64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ UNUSED(reg, target);
+ LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
+ LOG(FATAL) << "Unexpected use of OpVldm for Mips64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
+ LOG(FATAL) << "Unexpected use of OpVstm for Mips64";
+ UNREACHABLE();
+}
+
+void Mips64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
+ int lit, int first_bit, int second_bit) {
+ UNUSED(lit);
+ RegStorage t_reg = AllocTemp();
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
+ OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
+ FreeTemp(t_reg);
+ if (first_bit != 0) {
+ OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
+ }
+}
+
+void Mips64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
+ GenDivZeroCheck(reg);
+}
+
+// Test suspend flag, return target of taken suspend branch.
+LIR* Mips64Mir2Lir::OpTestSuspend(LIR* target) {
+ OpRegImm(kOpSub, rs_rMIPS64_SUSPEND, 1);
+ return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rs_rMIPS64_SUSPEND, 0, target);
+}
+
+// Decrement register and branch on condition.
+LIR* Mips64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
+ OpRegImm(kOpSub, reg, 1);
+ return OpCmpImmBranch(c_code, reg, 0, target);
+}
+
+bool Mips64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
+ RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
+ LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips64";
+ UNREACHABLE();
+}
+
+bool Mips64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
+ LOG(FATAL) << "Unexpected use of easyMultiply in Mips64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
+ UNUSED(cond, guide);
+ LOG(FATAL) << "Unexpected use of OpIT in Mips64";
+ UNREACHABLE();
+}
+
+void Mips64Mir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
+ LOG(FATAL) << "Unexpected use of OpEndIT in Mips64";
+}
+
+void Mips64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2, int flags) {
+ switch (opcode) {
+ case Instruction::NOT_LONG:
+ GenNotLong(rl_dest, rl_src2);
+ return;
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ GenMulLong(rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
+ return;
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
+ return;
+ case Instruction::AND_LONG:
+ case Instruction::AND_LONG_2ADDR:
+ GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::NEG_LONG:
+ GenNegLong(rl_dest, rl_src2);
+ return;
+
+ default:
+ LOG(FATAL) << "Invalid long arith op";
+ return;
+ }
+}
+
+void Mips64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegReg(kOpMvn, rl_result.reg, rl_src.reg);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(kMips64Dmul, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2, bool is_div,
+ int flags) {
+ UNUSED(opcode);
+ // TODO: Implement easy div/rem?
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+ GenDivZeroCheckWide(rl_src2.reg);
+ }
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(is_div ? kMips64Ddiv : kMips64Dmod, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
+ rl_src2.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
+/*
+ * Generate array load
+ */
+void Mips64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale) {
+ RegisterClass reg_class = RegClassBySize(size);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset;
+ RegLocation rl_result;
+ rl_array = LoadValue(rl_array, kRefReg);
+ rl_index = LoadValue(rl_index, kCoreReg);
+
+ // FIXME: need to add support for rl_index.is_const.
+
+ if (size == k64 || size == kDouble) {
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ // Null object?
+ GenNullCheck(rl_array.reg, opt_flags);
+
+ RegStorage reg_ptr = AllocTempRef();
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ RegStorage reg_len;
+ if (needs_range_check) {
+ reg_len = AllocTemp();
+ // Get len.
+ Load32Disp(rl_array.reg, len_offset, reg_len);
+ }
+ // reg_ptr -> array data.
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
+ FreeTemp(rl_array.reg);
+ if ((size == k64) || (size == kDouble)) {
+ if (scale) {
+ RegStorage r_new_index = AllocTemp();
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
+ OpRegReg(kOpAdd, reg_ptr, r_new_index);
+ FreeTemp(r_new_index);
+ } else {
+ OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
+ }
+ FreeTemp(rl_index.reg);
+ rl_result = EvalLoc(rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
+ FreeTemp(reg_len);
+ }
+ LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, kNotVolatile);
+
+ FreeTemp(reg_ptr);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
+ FreeTemp(reg_len);
+ }
+ if (rl_result.ref) {
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), As32BitReg(rl_result.reg), scale,
+ kReference);
+ } else {
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
+ }
+
+ FreeTemp(reg_ptr);
+ StoreValue(rl_dest, rl_result);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void Mips64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale,
+ bool card_mark) {
+ RegisterClass reg_class = RegClassBySize(size);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset;
+
+ if (size == k64 || size == kDouble) {
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ rl_array = LoadValue(rl_array, kRefReg);
+ rl_index = LoadValue(rl_index, kCoreReg);
+
+ // FIXME: need to add support for rl_index.is_const.
+
+ RegStorage reg_ptr;
+ bool allocated_reg_ptr_temp = false;
+ if (IsTemp(rl_array.reg) && !card_mark) {
+ Clobber(rl_array.reg);
+ reg_ptr = rl_array.reg;
+ } else {
+ reg_ptr = AllocTemp();
+ OpRegCopy(reg_ptr, rl_array.reg);
+ allocated_reg_ptr_temp = true;
+ }
+
+ // Null object?
+ GenNullCheck(rl_array.reg, opt_flags);
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ RegStorage reg_len;
+ if (needs_range_check) {
+ reg_len = AllocTemp();
+ // NOTE: max live temps(4) here.
+ // Get len.
+ Load32Disp(rl_array.reg, len_offset, reg_len);
+ }
+ // reg_ptr -> array data.
+ OpRegImm(kOpAdd, reg_ptr, data_offset);
+ // At this point, reg_ptr points to array, 2 live temps.
+ if ((size == k64) || (size == kDouble)) {
+ // TUNING: specific wide routine that can handle fp regs.
+ if (scale) {
+ RegStorage r_new_index = AllocTemp();
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
+ OpRegReg(kOpAdd, reg_ptr, r_new_index);
+ FreeTemp(r_new_index);
+ } else {
+ OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
+ }
+ rl_src = LoadValueWide(rl_src, reg_class);
+
+ if (needs_range_check) {
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
+ FreeTemp(reg_len);
+ }
+
+ StoreBaseDisp(reg_ptr, 0, rl_src.reg, size, kNotVolatile);
+ } else {
+ rl_src = LoadValue(rl_src, reg_class);
+ if (needs_range_check) {
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
+ FreeTemp(reg_len);
+ }
+ StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
+ }
+ if (allocated_reg_ptr_temp) {
+ FreeTemp(reg_ptr);
+ }
+ if (card_mark) {
+ MarkGCCard(opt_flags, rl_src.reg, rl_array.reg);
+ }
+}
+
+void Mips64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift) {
+ OpKind op = kOpBkpt;
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case: " << opcode;
+ }
+ rl_shift = LoadValue(rl_shift, kCoreReg);
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift, int flags) {
+ UNUSED(flags);
+ OpKind op = kOpBkpt;
+ // Per spec, we only care about low 6 bits of shift amount.
+ int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ if (shift_amount == 0) {
+ StoreValueWide(rl_dest, rl_src1);
+ return;
+ }
+
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case";
+ }
+ OpRegRegImm(op, rl_result.reg, rl_src1.reg, shift_amount);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2, int flags) {
+ // Default - bail to non-const handler.
+ GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
+}
+
+void Mips64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = LoadValue(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(kMips64Sll, rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
+ RegLocation rl_src, RegisterClass reg_class) {
+ FlushAllRegs(); // Send everything to home location.
+ CallRuntimeHelperRegLocation(trampoline, rl_src, false);
+ if (rl_dest.wide) {
+ RegLocation rl_result;
+ rl_result = GetReturnWide(reg_class);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ RegLocation rl_result;
+ rl_result = GetReturn(reg_class);
+ StoreValue(rl_dest, rl_result);
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips64/mips64_lir.h b/compiler/dex/quick/mips64/mips64_lir.h
new file mode 100644
index 0000000..4a5c5ce
--- /dev/null
+++ b/compiler/dex/quick/mips64/mips64_lir.h
@@ -0,0 +1,648 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
+#define ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
+
+#include "dex/reg_location.h"
+#include "dex/reg_storage.h"
+
+namespace art {
+
+/*
+ * Runtime register conventions.
+ *
+ * zero is always the value 0
+ * at is scratch (normally used as temp reg by assembler)
+ * v0, v1 are scratch (normally hold subroutine return values)
+ * a0-a7 are scratch (normally hold subroutine arguments)
+ * t0-t3, t8 are scratch
+ * t9 is scratch (normally used for function calls)
+ * s0 (rMIPS_SUSPEND) is reserved [holds suspend-check counter]
+ * s1 (rMIPS_SELF) is reserved [holds current &Thread]
+ * s2-s7 are callee save (promotion target)
+ * k0, k1 are reserved for use by interrupt handlers
+ * gp is reserved for global pointer
+ * sp is reserved
+ * s8 is callee save (promotion target)
+ * ra is scratch (normally holds the return addr)
+ *
+ * Preserved across C calls: s0-s8
+ * Trashed across C calls: at, v0-v1, a0-a7, t0-t3, t8-t9, gp, ra
+ *
+ * Floating pointer registers
+ * NOTE: there are 32 fp registers.
+ * f0-f31
+ *
+ * f0-f31 trashed across C calls
+ *
+ * For mips64 code use:
+ * a0-a7 to hold operands
+ * v0-v1 to hold results
+ * t0-t3, t8-t9 for temps
+ *
+ * All jump/branch instructions have a delay slot after it.
+ *
+ * Stack frame diagram (stack grows down, higher addresses at top):
+ *
+ * +------------------------+
+ * | IN[ins-1] | {Note: resides in caller's frame}
+ * | . |
+ * | IN[0] |
+ * | caller's Method* |
+ * +========================+ {Note: start of callee's frame}
+ * | spill region | {variable sized - will include lr if non-leaf.}
+ * +------------------------+
+ * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
+ * +------------------------+
+ * | V[locals-1] |
+ * | V[locals-2] |
+ * | . |
+ * | . |
+ * | V[1] |
+ * | V[0] |
+ * +------------------------+
+ * | 0 to 3 words padding |
+ * +------------------------+
+ * | OUT[outs-1] |
+ * | OUT[outs-2] |
+ * | . |
+ * | OUT[0] |
+ * | cur_method* | <<== sp w/ 16-byte alignment
+ * +========================+
+ */
+
+
+#define rARG0 rA0d
+#define rs_rARG0 rs_rA0d
+#define rARG1 rA1d
+#define rs_rARG1 rs_rA1d
+#define rARG2 rA2d
+#define rs_rARG2 rs_rA2d
+#define rARG3 rA3d
+#define rs_rARG3 rs_rA3d
+#define rARG4 rA4d
+#define rs_rARG4 rs_rA4d
+#define rARG5 rA5d
+#define rs_rARG5 rs_rA5d
+#define rARG6 rA6d
+#define rs_rARG6 rs_rA6d
+#define rARG7 rA7d
+#define rs_rARG7 rs_rA7d
+#define rRESULT0 rV0d
+#define rs_rRESULT0 rs_rV0d
+#define rRESULT1 rV1d
+#define rs_rRESULT1 rs_rV1d
+
+#define rFARG0 rF12
+#define rs_rFARG0 rs_rF12
+#define rFARG1 rF13
+#define rs_rFARG1 rs_rF13
+#define rFARG2 rF14
+#define rs_rFARG2 rs_rF14
+#define rFARG3 rF15
+#define rs_rFARG3 rs_rF15
+#define rFARG4 rF16
+#define rs_rFARG4 rs_rF16
+#define rFARG5 rF17
+#define rs_rFARG5 rs_rF17
+#define rFARG6 rF18
+#define rs_rFARG6 rs_rF18
+#define rFARG7 rF19
+#define rs_rFARG7 rs_rF19
+#define rFRESULT0 rF0
+#define rs_rFRESULT0 rs_rF0
+#define rFRESULT1 rF1
+#define rs_rFRESULT1 rs_rF1
+
+// Regs not used for Mips64.
+#define rMIPS64_LR RegStorage::kInvalidRegVal
+#define rMIPS64_PC RegStorage::kInvalidRegVal
+
+enum Mips64ResourceEncodingPos {
+ kMips64GPReg0 = 0,
+ kMips64RegSP = 29,
+ kMips64RegLR = 31,
+ kMips64FPReg0 = 32,
+ kMips64FPRegEnd = 64,
+ kMips64RegPC = kMips64FPRegEnd,
+ kMips64RegEnd = 65,
+};
+
+enum Mips64NativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
+ rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
+ rZEROd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
+ rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
+ rATd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
+ rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
+ rV0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
+ rV1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
+ rV1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
+ rA0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
+ rA0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
+ rA1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
+ rA1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
+ rA2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
+ rA2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
+ rA3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
+ rA3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
+ rA4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
+ rA4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
+ rA5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
+ rA5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
+ rA6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
+ rA6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
+ rA7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
+ rA7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
+ rT0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
+ rT0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
+ rT1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
+ rT1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
+ rT2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
+ rT2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
+ rT3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
+ rT3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
+ rS0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
+ rS0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 16,
+ rS1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
+ rS1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 17,
+ rS2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
+ rS2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 18,
+ rS3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
+ rS3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 19,
+ rS4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
+ rS4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 20,
+ rS5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
+ rS5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 21,
+ rS6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
+ rS6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 22,
+ rS7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
+ rS7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 23,
+ rT8 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
+ rT8d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 24,
+ rT9 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
+ rT9d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 25,
+ rK0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
+ rK0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 26,
+ rK1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
+ rK1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 27,
+ rGP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
+ rGPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 28,
+ rSP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
+ rSPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 29,
+ rFP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
+ rFPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 30,
+ rRA = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
+ rRAd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 31,
+
+ rF0 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
+ rF1 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
+ rF2 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 2,
+ rF3 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 3,
+ rF4 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 4,
+ rF5 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 5,
+ rF6 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 6,
+ rF7 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 7,
+ rF8 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 8,
+ rF9 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 9,
+ rF10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
+ rF11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
+ rF12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
+ rF13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
+ rF14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
+ rF15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
+ rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
+ rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
+ rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
+ rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
+ rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
+ rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
+ rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
+ rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
+ rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
+ rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
+ rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
+ rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
+ rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
+ rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
+ rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
+ rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
+
+ rD0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
+ rD1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
+ rD2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
+ rD3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
+ rD4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
+ rD5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
+ rD6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
+ rD7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
+ rD8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
+ rD9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
+ rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+ rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+ rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+ rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+ rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+ rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
+ rD16 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
+ rD17 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
+ rD18 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
+ rD19 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
+ rD20 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
+ rD21 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
+ rD22 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
+ rD23 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
+ rD24 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
+ rD25 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
+ rD26 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
+ rD27 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
+ rD28 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
+ rD29 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
+ rD30 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
+ rD31 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
+};
+
+constexpr RegStorage rs_rZERO(RegStorage::kValid | rZERO);
+constexpr RegStorage rs_rZEROd(RegStorage::kValid | rZEROd);
+constexpr RegStorage rs_rAT(RegStorage::kValid | rAT);
+constexpr RegStorage rs_rATd(RegStorage::kValid | rATd);
+constexpr RegStorage rs_rV0(RegStorage::kValid | rV0);
+constexpr RegStorage rs_rV0d(RegStorage::kValid | rV0d);
+constexpr RegStorage rs_rV1(RegStorage::kValid | rV1);
+constexpr RegStorage rs_rV1d(RegStorage::kValid | rV1d);
+constexpr RegStorage rs_rA0(RegStorage::kValid | rA0);
+constexpr RegStorage rs_rA0d(RegStorage::kValid | rA0d);
+constexpr RegStorage rs_rA1(RegStorage::kValid | rA1);
+constexpr RegStorage rs_rA1d(RegStorage::kValid | rA1d);
+constexpr RegStorage rs_rA2(RegStorage::kValid | rA2);
+constexpr RegStorage rs_rA2d(RegStorage::kValid | rA2d);
+constexpr RegStorage rs_rA3(RegStorage::kValid | rA3);
+constexpr RegStorage rs_rA3d(RegStorage::kValid | rA3d);
+constexpr RegStorage rs_rA4(RegStorage::kValid | rA4);
+constexpr RegStorage rs_rA4d(RegStorage::kValid | rA4d);
+constexpr RegStorage rs_rA5(RegStorage::kValid | rA5);
+constexpr RegStorage rs_rA5d(RegStorage::kValid | rA5d);
+constexpr RegStorage rs_rA6(RegStorage::kValid | rA6);
+constexpr RegStorage rs_rA6d(RegStorage::kValid | rA6d);
+constexpr RegStorage rs_rA7(RegStorage::kValid | rA7);
+constexpr RegStorage rs_rA7d(RegStorage::kValid | rA7d);
+constexpr RegStorage rs_rT0(RegStorage::kValid | rT0);
+constexpr RegStorage rs_rT0d(RegStorage::kValid | rT0d);
+constexpr RegStorage rs_rT1(RegStorage::kValid | rT1);
+constexpr RegStorage rs_rT1d(RegStorage::kValid | rT1d);
+constexpr RegStorage rs_rT2(RegStorage::kValid | rT2);
+constexpr RegStorage rs_rT2d(RegStorage::kValid | rT2d);
+constexpr RegStorage rs_rT3(RegStorage::kValid | rT3);
+constexpr RegStorage rs_rT3d(RegStorage::kValid | rT3d);
+constexpr RegStorage rs_rS0(RegStorage::kValid | rS0);
+constexpr RegStorage rs_rS0d(RegStorage::kValid | rS0d);
+constexpr RegStorage rs_rS1(RegStorage::kValid | rS1);
+constexpr RegStorage rs_rS1d(RegStorage::kValid | rS1d);
+constexpr RegStorage rs_rS2(RegStorage::kValid | rS2);
+constexpr RegStorage rs_rS2d(RegStorage::kValid | rS2d);
+constexpr RegStorage rs_rS3(RegStorage::kValid | rS3);
+constexpr RegStorage rs_rS3d(RegStorage::kValid | rS3d);
+constexpr RegStorage rs_rS4(RegStorage::kValid | rS4);
+constexpr RegStorage rs_rS4d(RegStorage::kValid | rS4d);
+constexpr RegStorage rs_rS5(RegStorage::kValid | rS5);
+constexpr RegStorage rs_rS5d(RegStorage::kValid | rS5d);
+constexpr RegStorage rs_rS6(RegStorage::kValid | rS6);
+constexpr RegStorage rs_rS6d(RegStorage::kValid | rS6d);
+constexpr RegStorage rs_rS7(RegStorage::kValid | rS7);
+constexpr RegStorage rs_rS7d(RegStorage::kValid | rS7d);
+constexpr RegStorage rs_rT8(RegStorage::kValid | rT8);
+constexpr RegStorage rs_rT8d(RegStorage::kValid | rT8d);
+constexpr RegStorage rs_rT9(RegStorage::kValid | rT9);
+constexpr RegStorage rs_rT9d(RegStorage::kValid | rT9d);
+constexpr RegStorage rs_rK0(RegStorage::kValid | rK0);
+constexpr RegStorage rs_rK0d(RegStorage::kValid | rK0d);
+constexpr RegStorage rs_rK1(RegStorage::kValid | rK1);
+constexpr RegStorage rs_rK1d(RegStorage::kValid | rK1d);
+constexpr RegStorage rs_rGP(RegStorage::kValid | rGP);
+constexpr RegStorage rs_rGPd(RegStorage::kValid | rGPd);
+constexpr RegStorage rs_rSP(RegStorage::kValid | rSP);
+constexpr RegStorage rs_rSPd(RegStorage::kValid | rSPd);
+constexpr RegStorage rs_rFP(RegStorage::kValid | rFP);
+constexpr RegStorage rs_rFPd(RegStorage::kValid | rFPd);
+constexpr RegStorage rs_rRA(RegStorage::kValid | rRA);
+constexpr RegStorage rs_rRAd(RegStorage::kValid | rRAd);
+
+constexpr RegStorage rs_rMIPS64_LR(RegStorage::kInvalid); // Not used for MIPS64.
+constexpr RegStorage rs_rMIPS64_PC(RegStorage::kInvalid); // Not used for MIPS64.
+constexpr RegStorage rs_rMIPS64_COUNT(RegStorage::kInvalid); // Not used for MIPS64.
+
+constexpr RegStorage rs_rF0(RegStorage::kValid | rF0);
+constexpr RegStorage rs_rF1(RegStorage::kValid | rF1);
+constexpr RegStorage rs_rF2(RegStorage::kValid | rF2);
+constexpr RegStorage rs_rF3(RegStorage::kValid | rF3);
+constexpr RegStorage rs_rF4(RegStorage::kValid | rF4);
+constexpr RegStorage rs_rF5(RegStorage::kValid | rF5);
+constexpr RegStorage rs_rF6(RegStorage::kValid | rF6);
+constexpr RegStorage rs_rF7(RegStorage::kValid | rF7);
+constexpr RegStorage rs_rF8(RegStorage::kValid | rF8);
+constexpr RegStorage rs_rF9(RegStorage::kValid | rF9);
+constexpr RegStorage rs_rF10(RegStorage::kValid | rF10);
+constexpr RegStorage rs_rF11(RegStorage::kValid | rF11);
+constexpr RegStorage rs_rF12(RegStorage::kValid | rF12);
+constexpr RegStorage rs_rF13(RegStorage::kValid | rF13);
+constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
+constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
+constexpr RegStorage rs_rF16(RegStorage::kValid | rF16);
+constexpr RegStorage rs_rF17(RegStorage::kValid | rF17);
+constexpr RegStorage rs_rF18(RegStorage::kValid | rF18);
+constexpr RegStorage rs_rF19(RegStorage::kValid | rF19);
+constexpr RegStorage rs_rF20(RegStorage::kValid | rF20);
+constexpr RegStorage rs_rF21(RegStorage::kValid | rF21);
+constexpr RegStorage rs_rF22(RegStorage::kValid | rF22);
+constexpr RegStorage rs_rF23(RegStorage::kValid | rF23);
+constexpr RegStorage rs_rF24(RegStorage::kValid | rF24);
+constexpr RegStorage rs_rF25(RegStorage::kValid | rF25);
+constexpr RegStorage rs_rF26(RegStorage::kValid | rF26);
+constexpr RegStorage rs_rF27(RegStorage::kValid | rF27);
+constexpr RegStorage rs_rF28(RegStorage::kValid | rF28);
+constexpr RegStorage rs_rF29(RegStorage::kValid | rF29);
+constexpr RegStorage rs_rF30(RegStorage::kValid | rF30);
+constexpr RegStorage rs_rF31(RegStorage::kValid | rF31);
+
+constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
+constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
+constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
+constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
+constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
+constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
+constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
+constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
+constexpr RegStorage rs_rD8(RegStorage::kValid | rD8);
+constexpr RegStorage rs_rD9(RegStorage::kValid | rD9);
+constexpr RegStorage rs_rD10(RegStorage::kValid | rD10);
+constexpr RegStorage rs_rD11(RegStorage::kValid | rD11);
+constexpr RegStorage rs_rD12(RegStorage::kValid | rD12);
+constexpr RegStorage rs_rD13(RegStorage::kValid | rD13);
+constexpr RegStorage rs_rD14(RegStorage::kValid | rD14);
+constexpr RegStorage rs_rD15(RegStorage::kValid | rD15);
+constexpr RegStorage rs_rD16(RegStorage::kValid | rD16);
+constexpr RegStorage rs_rD17(RegStorage::kValid | rD17);
+constexpr RegStorage rs_rD18(RegStorage::kValid | rD18);
+constexpr RegStorage rs_rD19(RegStorage::kValid | rD19);
+constexpr RegStorage rs_rD20(RegStorage::kValid | rD20);
+constexpr RegStorage rs_rD21(RegStorage::kValid | rD21);
+constexpr RegStorage rs_rD22(RegStorage::kValid | rD22);
+constexpr RegStorage rs_rD23(RegStorage::kValid | rD23);
+constexpr RegStorage rs_rD24(RegStorage::kValid | rD24);
+constexpr RegStorage rs_rD25(RegStorage::kValid | rD25);
+constexpr RegStorage rs_rD26(RegStorage::kValid | rD26);
+constexpr RegStorage rs_rD27(RegStorage::kValid | rD27);
+constexpr RegStorage rs_rD28(RegStorage::kValid | rD28);
+constexpr RegStorage rs_rD29(RegStorage::kValid | rD29);
+constexpr RegStorage rs_rD30(RegStorage::kValid | rD30);
+constexpr RegStorage rs_rD31(RegStorage::kValid | rD31);
+
+// TODO: reduce/eliminate use of these.
+#define rMIPS64_SUSPEND rS0d
+#define rs_rMIPS64_SUSPEND rs_rS0d
+#define rMIPS64_SELF rS1d
+#define rs_rMIPS64_SELF rs_rS1d
+#define rMIPS64_SP rSPd
+#define rs_rMIPS64_SP rs_rSPd
+#define rMIPS64_ARG0 rARG0
+#define rs_rMIPS64_ARG0 rs_rARG0
+#define rMIPS64_ARG1 rARG1
+#define rs_rMIPS64_ARG1 rs_rARG1
+#define rMIPS64_ARG2 rARG2
+#define rs_rMIPS64_ARG2 rs_rARG2
+#define rMIPS64_ARG3 rARG3
+#define rs_rMIPS64_ARG3 rs_rARG3
+#define rMIPS64_ARG4 rARG4
+#define rs_rMIPS64_ARG4 rs_rARG4
+#define rMIPS64_ARG5 rARG5
+#define rs_rMIPS64_ARG5 rs_rARG5
+#define rMIPS64_ARG6 rARG6
+#define rs_rMIPS64_ARG6 rs_rARG6
+#define rMIPS64_ARG7 rARG7
+#define rs_rMIPS64_ARG7 rs_rARG7
+#define rMIPS64_FARG0 rFARG0
+#define rs_rMIPS64_FARG0 rs_rFARG0
+#define rMIPS64_FARG1 rFARG1
+#define rs_rMIPS64_FARG1 rs_rFARG1
+#define rMIPS64_FARG2 rFARG2
+#define rs_rMIPS64_FARG2 rs_rFARG2
+#define rMIPS64_FARG3 rFARG3
+#define rs_rMIPS64_FARG3 rs_rFARG3
+#define rMIPS64_FARG4 rFARG4
+#define rs_rMIPS64_FARG4 rs_rFARG4
+#define rMIPS64_FARG5 rFARG5
+#define rs_rMIPS64_FARG5 rs_rFARG5
+#define rMIPS64_FARG6 rFARG6
+#define rs_rMIPS64_FARG6 rs_rFARG6
+#define rMIPS64_FARG7 rFARG7
+#define rs_rMIPS64_FARG7 rs_rFARG7
+#define rMIPS64_RET0 rRESULT0
+#define rs_rMIPS64_RET0 rs_rRESULT0
+#define rMIPS64_RET1 rRESULT1
+#define rs_rMIPS64_RET1 rs_rRESULT1
+#define rMIPS64_INVOKE_TGT rT9d
+#define rs_rMIPS64_INVOKE_TGT rs_rT9d
+#define rMIPS64_COUNT RegStorage::kInvalidRegVal
+
+// RegisterLocation templates return values (r_V0).
+const RegLocation mips64_loc_c_return
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
+ RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_ref
+ {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_wide
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_float
+ {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
+ RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_double
+ {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rD0), INVALID_SREG, INVALID_SREG};
+
+enum Mips64ShiftEncodings {
+ kMips64Lsl = 0x0,
+ kMips64Lsr = 0x1,
+ kMips64Asr = 0x2,
+ kMips64Ror = 0x3
+};
+
+// MIPS64 sync kinds (Note: support for kinds other than kSYNC0 may not exist).
+#define kSYNC0 0x00
+#define kSYNC_WMB 0x04
+#define kSYNC_MB 0x01
+#define kSYNC_ACQUIRE 0x11
+#define kSYNC_RELEASE 0x12
+#define kSYNC_RMB 0x13
+
+// TODO: Use smaller hammer when appropriate for target CPU.
+#define kST kSYNC0
+#define kSY kSYNC0
+
+/*
+ * The following enum defines the list of supported Mips64 instructions by the
+ * assembler. Their corresponding EncodingMap positions will be defined in
+ * assemble_mips64.cc.
+ */
+enum Mips64OpCode {
+ kMips64First = 0,
+ kMips6432BitData = kMips64First, // data [31..0].
+ kMips64Addiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+ kMips64Addu, // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
+ kMips64And, // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
+ kMips64Andi, // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
+ kMips64B, // b o [0001000000000000] o[15..0].
+ kMips64Bal, // bal o [0000010000010001] o[15..0].
+ // NOTE: the code tests the range kMips64Beq thru kMips64Bne, so adding an instruction in this
+ // range may require updates.
+ kMips64Beq, // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
+ kMips64Beqz, // beqz s,o [000100] s[25..21] [00000] o[15..0].
+ kMips64Bgez, // bgez s,o [000001] s[25..21] [00001] o[15..0].
+ kMips64Bgtz, // bgtz s,o [000111] s[25..21] [00000] o[15..0].
+ kMips64Blez, // blez s,o [000110] s[25..21] [00000] o[15..0].
+ kMips64Bltz, // bltz s,o [000001] s[25..21] [00000] o[15..0].
+ kMips64Bnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
+ kMips64Bne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
+ kMips64Break, // break code [000000] code[25..6] [001101].
+ kMips64Daddiu, // daddiu t,s,imm16 [011001] s[25..21] t[20..16] imm16[15..11].
+ kMips64Daddu, // daddu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101101].
+ kMips64Dahi, // dahi s,imm16 [000001] s[25..21] [00110] imm16[15..11].
+ kMips64Dati, // dati s,imm16 [000001] s[25..21] [11110] imm16[15..11].
+ kMips64Daui, // daui t,s,imm16 [011101] s[25..21] t[20..16] imm16[15..11].
+ kMips64Ddiv, // ddiv d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011110].
+ kMips64Div, // div d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
+ kMips64Dmod, // dmod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011110].
+ kMips64Dmul, // dmul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011100].
+ kMips64Dmfc1, // dmfc1 t,s [01000100001] t[20..16] s[15..11] [00000000000].
+ kMips64Dmtc1, // dmtc1 t,s [01000100101] t[20..16] s[15..11] [00000000000].
+ kMips64Drotr32, // drotr32 d,t,a [00000000001] t[20..16] d[15..11] a[10..6] [111110].
+ kMips64Dsll, // dsll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111000].
+ kMips64Dsll32, // dsll32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111100].
+ kMips64Dsrl, // dsrl d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111010].
+ kMips64Dsrl32, // dsrl32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111110].
+ kMips64Dsra, // dsra d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111011].
+ kMips64Dsra32, // dsra32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111111].
+ kMips64Dsllv, // dsllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010100].
+ kMips64Dsrlv, // dsrlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010110].
+ kMips64Dsrav, // dsrav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010111].
+ kMips64Dsubu, // dsubu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101111].
+ kMips64Ext, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
+ kMips64Faddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
+ kMips64Fadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
+ kMips64Fdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
+ kMips64Fdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
+ kMips64Fmuld, // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
+ kMips64Fmuls, // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
+ kMips64Fsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
+ kMips64Fsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
+ kMips64Fcvtsd, // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
+ kMips64Fcvtsw, // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
+ kMips64Fcvtds, // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
+ kMips64Fcvtdw, // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
+ kMips64Fcvtws, // cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
+ kMips64Fcvtwd, // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
+ kMips64Fmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
+ kMips64Fmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
+ kMips64Fnegd, // neg.d d,s [01000110001] [00000] s[15..11] d[10..6] [000111].
+ kMips64Fnegs, // neg.s d,s [01000110000] [00000] s[15..11] d[10..6] [000111].
+ kMips64Fldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
+ kMips64Flwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
+ kMips64Fsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
+ kMips64Fswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
+ kMips64Jal, // jal t [000011] t[25..0].
+ kMips64Jalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
+ kMips64Lahi, // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
+ kMips64Lalo, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
+ kMips64Lb, // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
+ kMips64Lbu, // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
+ kMips64Ld, // ld t,o(b) [110111] b[25..21] t[20..16] o[15..0].
+ kMips64Lh, // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
+ kMips64Lhu, // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
+ kMips64Lui, // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
+ kMips64Lw, // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
+ kMips64Lwu, // lwu t,o(b) [100111] b[25..21] t[20..16] o[15..0].
+ kMips64Mfc1, // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
+ kMips64Mtc1, // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
+ kMips64Move, // move d,s [000000] s[25..21] [00000] d[15..11] [00000101101].
+ kMips64Mod, // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
+ kMips64Mul, // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
+ kMips64Nop, // nop [00000000000000000000000000000000].
+ kMips64Nor, // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
+ kMips64Or, // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
+ kMips64Ori, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+ kMips64Sb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
+ kMips64Sd, // sd t,o(b) [111111] b[25..21] t[20..16] o[15..0].
+ kMips64Seb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
+ kMips64Seh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
+ kMips64Sh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
+ kMips64Sll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
+ kMips64Sllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
+ kMips64Slt, // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
+ kMips64Slti, // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
+ kMips64Sltu, // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
+ kMips64Sra, // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
+ kMips64Srav, // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
+ kMips64Srl, // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
+ kMips64Srlv, // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
+ kMips64Subu, // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
+ kMips64Sw, // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
+ kMips64Sync, // sync kind [000000] [0000000000000000] s[10..6] [001111].
+ kMips64Xor, // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
+ kMips64Xori, // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
+ kMips64CurrPC, // jal to .+8 to materialize pc.
+ kMips64Delta, // Psuedo for ori t, s, <label>-<label>.
+ kMips64DeltaHi, // Pseudo for lui t, high16(<label>-<label>).
+ kMips64DeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
+ kMips64Undefined, // undefined [011001xxxxxxxxxxxxxxxx].
+ kMips64Last
+};
+std::ostream& operator<<(std::ostream& os, const Mips64OpCode& rhs);
+
+// Instruction assembly field_loc kind.
+enum Mips64EncodingKind {
+ kFmtUnused,
+ kFmtBitBlt, // Bit string using end/start.
+ kFmtDfp, // Double FP reg.
+ kFmtSfp, // Single FP reg.
+ kFmtBlt5_2, // Same 5-bit field to 2 locations.
+};
+std::ostream& operator<<(std::ostream& os, const Mips64EncodingKind& rhs);
+
+// Struct used to define the snippet positions for each MIPS64 opcode.
+struct Mips64EncodingMap {
+ uint32_t skeleton;
+ struct {
+ Mips64EncodingKind kind;
+ int end; // end for kFmtBitBlt, 1-bit slice end for FP regs.
+ int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
+ } field_loc[4];
+ Mips64OpCode opcode;
+ uint64_t flags;
+ const char *name;
+ const char* fmt;
+ int size; // Note: size is in bytes.
+};
+
+extern Mips64EncodingMap EncodingMap[kMips64Last];
+
+#define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
+#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
+#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) // 2 offsets must fit.
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
diff --git a/compiler/dex/quick/mips64/target_mips64.cc b/compiler/dex/quick/mips64/target_mips64.cc
new file mode 100644
index 0000000..6ed9617
--- /dev/null
+++ b/compiler/dex/quick/mips64/target_mips64.cc
@@ -0,0 +1,653 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include <inttypes.h>
+
+#include <string>
+
+#include "arch/mips64/instruction_set_features_mips64.h"
+#include "backend_mips64.h"
+#include "base/logging.h"
+#include "dex/compiler_ir.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "driver/compiler_driver.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+static constexpr RegStorage core_regs_arr32[] =
+ {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6,
+ rs_rA7, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5,
+ rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
+static constexpr RegStorage core_regs_arr64[] =
+ {rs_rZEROd, rs_rATd, rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d,
+ rs_rA6d, rs_rA7d, rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rS0d, rs_rS1d, rs_rS2d, rs_rS3d,
+ rs_rS4d, rs_rS5d, rs_rS6d, rs_rS7d, rs_rT8d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd,
+ rs_rFPd, rs_rRAd};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_regs_arr[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+ rs_rF31};
+static constexpr RegStorage dp_regs_arr[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+ rs_rD31};
+#else
+static constexpr RegStorage sp_regs_arr[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_regs_arr[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23};
+#endif
+static constexpr RegStorage reserved_regs_arr32[] =
+ {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
+static constexpr RegStorage reserved_regs_arr64[] =
+ {rs_rZEROd, rs_rATd, rs_rS0d, rs_rS1d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd, rs_rRAd};
+static constexpr RegStorage core_temps_arr32[] =
+ {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6, rs_rA7, rs_rT0,
+ rs_rT1, rs_rT2, rs_rT3, rs_rT8};
+static constexpr RegStorage core_temps_arr64[] =
+ {rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d, rs_rA6d, rs_rA7d,
+ rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rT8d};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_temps_arr[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+ rs_rF31};
+static constexpr RegStorage dp_temps_arr[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+ rs_rD31};
+#else
+static constexpr RegStorage sp_temps_arr[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_temps_arr[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23};
+#endif
+
+static constexpr ArrayRef<const RegStorage> empty_pool;
+static constexpr ArrayRef<const RegStorage> core_regs32(core_regs_arr32);
+static constexpr ArrayRef<const RegStorage> core_regs64(core_regs_arr64);
+static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
+static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
+static constexpr ArrayRef<const RegStorage> reserved_regs32(reserved_regs_arr32);
+static constexpr ArrayRef<const RegStorage> reserved_regs64(reserved_regs_arr64);
+static constexpr ArrayRef<const RegStorage> core_temps32(core_temps_arr32);
+static constexpr ArrayRef<const RegStorage> core_temps64(core_temps_arr64);
+static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
+static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
+
+RegLocation Mips64Mir2Lir::LocCReturn() {
+ return mips64_loc_c_return;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnRef() {
+ return mips64_loc_c_return_ref;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnWide() {
+ return mips64_loc_c_return_wide;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnFloat() {
+ return mips64_loc_c_return_float;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnDouble() {
+ return mips64_loc_c_return_double;
+}
+
+// Return a target-dependent special register.
+RegStorage Mips64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+ RegStorage res_reg;
+ switch (reg) {
+ case kSelf: res_reg = rs_rS1; break;
+ case kSuspend: res_reg = rs_rS0; break;
+ case kLr: res_reg = rs_rRA; break;
+ case kPc: res_reg = RegStorage::InvalidReg(); break;
+ case kSp: res_reg = rs_rSP; break;
+ case kArg0: res_reg = rs_rA0; break;
+ case kArg1: res_reg = rs_rA1; break;
+ case kArg2: res_reg = rs_rA2; break;
+ case kArg3: res_reg = rs_rA3; break;
+ case kArg4: res_reg = rs_rA4; break;
+ case kArg5: res_reg = rs_rA5; break;
+ case kArg6: res_reg = rs_rA6; break;
+ case kArg7: res_reg = rs_rA7; break;
+ case kFArg0: res_reg = rs_rF12; break;
+ case kFArg1: res_reg = rs_rF13; break;
+ case kFArg2: res_reg = rs_rF14; break;
+ case kFArg3: res_reg = rs_rF15; break;
+ case kFArg4: res_reg = rs_rF16; break;
+ case kFArg5: res_reg = rs_rF17; break;
+ case kFArg6: res_reg = rs_rF18; break;
+ case kFArg7: res_reg = rs_rF19; break;
+ case kRet0: res_reg = rs_rV0; break;
+ case kRet1: res_reg = rs_rV1; break;
+ case kInvokeTgt: res_reg = rs_rT9; break;
+ case kHiddenArg: res_reg = rs_rT0; break;
+ case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
+ case kCount: res_reg = RegStorage::InvalidReg(); break;
+ default: res_reg = RegStorage::InvalidReg();
+ }
+ return res_reg;
+}
+
+RegStorage Mips64Mir2Lir::InToRegStorageMips64Mapper::GetNextReg(ShortyArg arg) {
+ const SpecialTargetRegister coreArgMappingToPhysicalReg[] =
+ {kArg1, kArg2, kArg3, kArg4, kArg5, kArg6, kArg7};
+ const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
+ const SpecialTargetRegister fpArgMappingToPhysicalReg[] =
+ {kFArg1, kFArg2, kFArg3, kFArg4, kFArg5, kFArg6, kFArg7};
+ const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
+
+ RegStorage result = RegStorage::InvalidReg();
+ if (arg.IsFP()) {
+ if (cur_arg_reg_ < fpArgMappingToPhysicalRegSize) {
+ DCHECK(!arg.IsRef());
+ result = m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_arg_reg_++],
+ arg.IsWide() ? kWide : kNotWide);
+ }
+ } else {
+ if (cur_arg_reg_ < coreArgMappingToPhysicalRegSize) {
+ DCHECK(!(arg.IsWide() && arg.IsRef()));
+ result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_arg_reg_++],
+ arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
+ }
+ }
+ return result;
+}
+
+/*
+ * Decode the register id.
+ */
+ResourceMask Mips64Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
+ return ResourceMask::Bit((reg.IsFloat() ? kMips64FPReg0 : 0) + reg.GetRegNum());
+}
+
+ResourceMask Mips64Mir2Lir::GetPCUseDefEncoding() const {
+ return ResourceMask::Bit(kMips64RegPC);
+}
+
+
+void Mips64Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+ ResourceMask* def_mask) {
+ DCHECK(!lir->flags.use_def_invalid);
+
+ // Mips64-specific resource map setup here.
+ if (flags & REG_DEF_SP) {
+ def_mask->SetBit(kMips64RegSP);
+ }
+
+ if (flags & REG_USE_SP) {
+ use_mask->SetBit(kMips64RegSP);
+ }
+
+ if (flags & REG_DEF_LR) {
+ def_mask->SetBit(kMips64RegLR);
+ }
+}
+
+/* For dumping instructions */
+#define MIPS64_REG_COUNT 32
+static const char *mips64_reg_name[MIPS64_REG_COUNT] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in assemble_mips64.cc.
+ */
+std::string Mips64Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
+ std::string buf;
+ int i;
+ const char *fmt_end = &fmt[strlen(fmt)];
+ char tbuf[256];
+ char nc;
+ while (fmt < fmt_end) {
+ int operand;
+ if (*fmt == '!') {
+ fmt++;
+ DCHECK_LT(fmt, fmt_end);
+ nc = *fmt++;
+ if (nc == '!') {
+ strcpy(tbuf, "!");
+ } else {
+ DCHECK_LT(fmt, fmt_end);
+ DCHECK_LT(static_cast<unsigned>(nc-'0'), 4u);
+ operand = lir->operands[nc-'0'];
+ switch (*fmt++) {
+ case 'b':
+ strcpy(tbuf, "0000");
+ for (i = 3; i >= 0; i--) {
+ tbuf[i] += operand & 1;
+ operand >>= 1;
+ }
+ break;
+ case 's':
+ snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
+ break;
+ case 'S':
+ DCHECK_EQ(RegStorage::RegNum(operand) & 1, 0);
+ snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
+ break;
+ case 'h':
+ snprintf(tbuf, arraysize(tbuf), "%04x", operand);
+ break;
+ case 'M':
+ case 'd':
+ snprintf(tbuf, arraysize(tbuf), "%d", operand);
+ break;
+ case 'D':
+ snprintf(tbuf, arraysize(tbuf), "%d", operand+1);
+ break;
+ case 'E':
+ snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
+ break;
+ case 'F':
+ snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
+ break;
+ case 't':
+ snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
+ reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 + (operand << 1),
+ lir->target);
+ break;
+ case 'T':
+ snprintf(tbuf, arraysize(tbuf), "0x%08x", operand << 2);
+ break;
+ case 'u': {
+ int offset_1 = lir->operands[0];
+ int offset_2 = NEXT_LIR(lir)->operands[0];
+ uintptr_t target =
+ (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) & ~3) +
+ (offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
+ snprintf(tbuf, arraysize(tbuf), "%p", reinterpret_cast<void*>(target));
+ break;
+ }
+
+ /* Nothing to print for BLX_2 */
+ case 'v':
+ strcpy(tbuf, "see above");
+ break;
+ case 'r':
+ DCHECK(operand >= 0 && operand < MIPS64_REG_COUNT);
+ strcpy(tbuf, mips64_reg_name[operand]);
+ break;
+ case 'N':
+ // Placeholder for delay slot handling
+ strcpy(tbuf, "; nop");
+ break;
+ default:
+ strcpy(tbuf, "DecodeError");
+ break;
+ }
+ buf += tbuf;
+ }
+ } else {
+ buf += *fmt++;
+ }
+ }
+ return buf;
+}
+
+// FIXME: need to redo resource maps for MIPS64 - fix this at that time.
+void Mips64Mir2Lir::DumpResourceMask(LIR *mips64_lir, const ResourceMask& mask, const char *prefix) {
+ char buf[256];
+ buf[0] = 0;
+
+ if (mask.Equals(kEncodeAll)) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
+
+ for (i = 0; i < kMips64RegEnd; i++) {
+ if (mask.HasBit(i)) {
+ snprintf(num, arraysize(num), "%d ", i);
+ strcat(buf, num);
+ }
+ }
+
+ if (mask.HasBit(ResourceMask::kCCode)) {
+ strcat(buf, "cc ");
+ }
+ if (mask.HasBit(ResourceMask::kFPStatus)) {
+ strcat(buf, "fpcc ");
+ }
+ // Memory bits.
+ if (mips64_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
+ snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
+ DECODE_ALIAS_INFO_REG(mips64_lir->flags.alias_info),
+ DECODE_ALIAS_INFO_WIDE(mips64_lir->flags.alias_info) ? "(+1)" : "");
+ }
+ if (mask.HasBit(ResourceMask::kLiteral)) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask.HasBit(ResourceMask::kHeapRef)) {
+ strcat(buf, "heap ");
+ }
+ if (mask.HasBit(ResourceMask::kMustNotAlias)) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
+}
+
+/*
+ * TUNING: is true leaf? Can't just use METHOD_IS_LEAF to determine as some
+ * instructions might call out to C/assembly helper functions. Until
+ * machinery is in place, always spill lr.
+ */
+
+void Mips64Mir2Lir::AdjustSpillMask() {
+ core_spill_mask_ |= (1 << rs_rRA.GetRegNum());
+ num_core_spills_++;
+}
+
+/* Clobber all regs that might be used by an external C call */
+void Mips64Mir2Lir::ClobberCallerSave() {
+ Clobber(rs_rZEROd);
+ Clobber(rs_rATd);
+ Clobber(rs_rV0d);
+ Clobber(rs_rV1d);
+ Clobber(rs_rA0d);
+ Clobber(rs_rA1d);
+ Clobber(rs_rA2d);
+ Clobber(rs_rA3d);
+ Clobber(rs_rA4d);
+ Clobber(rs_rA5d);
+ Clobber(rs_rA6d);
+ Clobber(rs_rA7d);
+ Clobber(rs_rT0d);
+ Clobber(rs_rT1d);
+ Clobber(rs_rT2d);
+ Clobber(rs_rT3d);
+ Clobber(rs_rT8d);
+ Clobber(rs_rT9d);
+ Clobber(rs_rK0d);
+ Clobber(rs_rK1d);
+ Clobber(rs_rGPd);
+ Clobber(rs_rFPd);
+ Clobber(rs_rRAd);
+
+ Clobber(rs_rF0);
+ Clobber(rs_rF1);
+ Clobber(rs_rF2);
+ Clobber(rs_rF3);
+ Clobber(rs_rF4);
+ Clobber(rs_rF5);
+ Clobber(rs_rF6);
+ Clobber(rs_rF7);
+ Clobber(rs_rF8);
+ Clobber(rs_rF9);
+ Clobber(rs_rF10);
+ Clobber(rs_rF11);
+ Clobber(rs_rF12);
+ Clobber(rs_rF13);
+ Clobber(rs_rF14);
+ Clobber(rs_rF15);
+ Clobber(rs_rD0);
+ Clobber(rs_rD1);
+ Clobber(rs_rD2);
+ Clobber(rs_rD3);
+ Clobber(rs_rD4);
+ Clobber(rs_rD5);
+ Clobber(rs_rD6);
+ Clobber(rs_rD7);
+}
+
+RegLocation Mips64Mir2Lir::GetReturnWideAlt() {
+ UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS64";
+ RegLocation res = LocCReturnWide();
+ return res;
+}
+
+RegLocation Mips64Mir2Lir::GetReturnAlt() {
+ UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS64";
+ RegLocation res = LocCReturn();
+ return res;
+}
+
+/* To be used when explicitly managing register use */
+void Mips64Mir2Lir::LockCallTemps() {
+ LockTemp(rs_rMIPS64_ARG0);
+ LockTemp(rs_rMIPS64_ARG1);
+ LockTemp(rs_rMIPS64_ARG2);
+ LockTemp(rs_rMIPS64_ARG3);
+ LockTemp(rs_rMIPS64_ARG4);
+ LockTemp(rs_rMIPS64_ARG5);
+ LockTemp(rs_rMIPS64_ARG6);
+ LockTemp(rs_rMIPS64_ARG7);
+}
+
+/* To be used when explicitly managing register use */
+void Mips64Mir2Lir::FreeCallTemps() {
+ FreeTemp(rs_rMIPS64_ARG0);
+ FreeTemp(rs_rMIPS64_ARG1);
+ FreeTemp(rs_rMIPS64_ARG2);
+ FreeTemp(rs_rMIPS64_ARG3);
+ FreeTemp(rs_rMIPS64_ARG4);
+ FreeTemp(rs_rMIPS64_ARG5);
+ FreeTemp(rs_rMIPS64_ARG6);
+ FreeTemp(rs_rMIPS64_ARG7);
+ FreeTemp(TargetReg(kHiddenArg));
+}
+
+bool Mips64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED) {
+ if (cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
+ NewLIR1(kMips64Sync, 0 /* Only stype currently supported */);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void Mips64Mir2Lir::CompilerInitializeRegAlloc() {
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs32, core_regs64 , sp_regs,
+ dp_regs, reserved_regs32, reserved_regs64,
+ core_temps32, core_temps64, sp_temps,
+ dp_temps));
+
+ // Target-specific adjustments.
+
+ // Alias single precision floats to appropriate half of overlapping double.
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
+ int sp_reg_num = info->GetReg().GetRegNum();
+ int dp_reg_num = sp_reg_num;
+ RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+ RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+ // Double precision register's master storage should refer to itself.
+ DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+ // Redirect single precision's master storage to master.
+ info->SetMaster(dp_reg_info);
+ // Singles should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ }
+
+ // Alias 32bit W registers to corresponding 64bit X registers.
+ for (RegisterInfo* info : reg_pool_->core_regs_) {
+ int d_reg_num = info->GetReg().GetRegNum();
+ RegStorage d_reg = RegStorage::Solo64(d_reg_num);
+ RegisterInfo* d_reg_info = GetRegInfo(d_reg);
+ // 64bit D register's master storage should refer to itself.
+ DCHECK_EQ(d_reg_info, d_reg_info->Master());
+ // Redirect 32bit master storage to 64bit D.
+ info->SetMaster(d_reg_info);
+ // 32bit should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ }
+
+ // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
+ // TODO: adjust when we roll to hard float calling convention.
+ reg_pool_->next_core_reg_ = 2;
+ reg_pool_->next_sp_reg_ = 2;
+ reg_pool_->next_dp_reg_ = 1;
+}
+
+/*
+ * In the Arm code a it is typical to use the link register
+ * to hold the target address. However, for Mips64 we must
+ * ensure that all branch instructions can be restarted if
+ * there is a trap in the shadow. Allocate a temp register.
+ */
+RegStorage Mips64Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+ // NOTE: native pointer.
+ LoadWordDisp(rs_rMIPS64_SELF, GetThreadOffset<8>(trampoline).Int32Value(), rs_rT9d);
+ return rs_rT9d;
+}
+
+LIR* Mips64Mir2Lir::CheckSuspendUsingLoad() {
+ RegStorage tmp = AllocTemp();
+ // NOTE: native pointer.
+ LoadWordDisp(rs_rMIPS64_SELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
+ LIR *inst = LoadWordDisp(tmp, 0, tmp);
+ FreeTemp(tmp);
+ return inst;
+}
+
+LIR* Mips64Mir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
+ DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadStore().
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers.
+ RegStorage reg_ptr = TargetReg(kArg0);
+ OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
+ RegStorage r_tgt = LoadHelper(kQuickA64Load);
+ LIR *ret = OpReg(kOpBlx, r_tgt);
+ OpRegCopy(r_dest, TargetReg(kRet0));
+ return ret;
+}
+
+LIR* Mips64Mir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
+ DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadStore().
+ DCHECK(!r_src.IsPair());
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers.
+ RegStorage temp_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
+ RegStorage temp_value = AllocTemp();
+ OpRegCopy(temp_value, r_src);
+ OpRegCopy(TargetReg(kArg0), temp_ptr);
+ OpRegCopy(TargetReg(kArg1), temp_value);
+ FreeTemp(temp_ptr);
+ FreeTemp(temp_value);
+ RegStorage r_tgt = LoadHelper(kQuickA64Store);
+ return OpReg(kOpBlx, r_tgt);
+}
+
+void Mips64Mir2Lir::SpillCoreRegs() {
+ if (num_core_spills_ == 0) {
+ return;
+ }
+ uint32_t mask = core_spill_mask_;
+ // Start saving from offset 0 so that ra ends up on the top of the frame.
+ int offset = 0;
+ OpRegImm(kOpSub, rs_rSPd, num_core_spills_ * 8);
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ StoreWordDisp(rs_rMIPS64_SP, offset, RegStorage::Solo64(reg));
+ offset += 8;
+ }
+ }
+}
+
+void Mips64Mir2Lir::UnSpillCoreRegs() {
+ if (num_core_spills_ == 0) {
+ return;
+ }
+ uint32_t mask = core_spill_mask_;
+ int offset = frame_size_ - num_core_spills_ * 8;
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ LoadWordDisp(rs_rMIPS64_SP, offset, RegStorage::Solo64(reg));
+ offset += 8;
+ }
+ }
+ OpRegImm(kOpAdd, rs_rSPd, frame_size_);
+}
+
+bool Mips64Mir2Lir::IsUnconditionalBranch(LIR* lir) {
+ return (lir->opcode == kMips64B);
+}
+
+RegisterClass Mips64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
+ if (UNLIKELY(is_volatile)) {
+ // On Mips64, atomic 64-bit load/store requires a core register.
+ // Smaller aligned load/store is atomic for both core and fp registers.
+ if (size == k64 || size == kDouble) {
+ return kCoreReg;
+ }
+ }
+ // TODO: Verify that both core and fp registers are suitable for smaller sizes.
+ return RegClassBySize(size);
+}
+
+Mips64Mir2Lir::Mips64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+ : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips64_mapper_(this) {
+ for (int i = 0; i < kMips64Last; i++) {
+ DCHECK_EQ(Mips64Mir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << Mips64Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(Mips64Mir2Lir::EncodingMap[i].opcode);
+ }
+}
+
+Mir2Lir* Mips64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+ ArenaAllocator* const arena) {
+ return new Mips64Mir2Lir(cu, mir_graph, arena);
+}
+
+uint64_t Mips64Mir2Lir::GetTargetInstFlags(int opcode) {
+ DCHECK(!IsPseudoLirOp(opcode));
+ return Mips64Mir2Lir::EncodingMap[opcode].flags;
+}
+
+const char* Mips64Mir2Lir::GetTargetInstName(int opcode) {
+ DCHECK(!IsPseudoLirOp(opcode));
+ return Mips64Mir2Lir::EncodingMap[opcode].name;
+}
+
+const char* Mips64Mir2Lir::GetTargetInstFmt(int opcode) {
+ DCHECK(!IsPseudoLirOp(opcode));
+ return Mips64Mir2Lir::EncodingMap[opcode].fmt;
+}
+
+void Mips64Mir2Lir::GenBreakpoint(int code) {
+ NewLIR1(kMips64Break, code);
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips64/utility_mips64.cc b/compiler/dex/quick/mips64/utility_mips64.cc
new file mode 100644
index 0000000..38e354c
--- /dev/null
+++ b/compiler/dex/quick/mips64/utility_mips64.cc
@@ -0,0 +1,875 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include "arch/mips64/instruction_set_features_mips64.h"
+#include "base/logging.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "dex/reg_storage_eq.h"
+#include "driver/compiler_driver.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+/* This file contains codegen for the MIPS64 ISA. */
+
+LIR* Mips64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ int opcode;
+ // Must be both DOUBLE or both not DOUBLE.
+ DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
+ if (r_dest.Is64Bit()) {
+ if (r_dest.IsDouble()) {
+ if (r_src.IsDouble()) {
+ opcode = kMips64Fmovd;
+ } else {
+ // Note the operands are swapped for the dmtc1 instr.
+ RegStorage t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMips64Dmtc1;
+ }
+ } else {
+ DCHECK(r_src.IsDouble());
+ opcode = kMips64Dmfc1;
+ }
+ } else {
+ if (r_dest.IsSingle()) {
+ if (r_src.IsSingle()) {
+ opcode = kMips64Fmovs;
+ } else {
+ // Note the operands are swapped for the mtc1 instr.
+ RegStorage t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMips64Mtc1;
+ }
+ } else {
+ DCHECK(r_src.IsSingle());
+ opcode = kMips64Mfc1;
+ }
+ }
+ LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
+ if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
+ }
+ return res;
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantInt(int32_t value) {
+ // For encodings, see LoadConstantNoClobber below.
+ return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
+ UNUSED(value);
+ return false; // TUNING
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantLong(int64_t value) {
+ UNUSED(value);
+ return false; // TUNING
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
+ UNUSED(value);
+ return false; // TUNING
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool. If target is
+ * a high register, build constant into a low register and copy.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) r_dest is freshly returned from AllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR* Mips64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
+ LIR *res;
+
+ RegStorage r_dest_save = r_dest;
+ int is_fp_reg = r_dest.IsFloat();
+ if (is_fp_reg) {
+ DCHECK(r_dest.IsSingle());
+ r_dest = AllocTemp();
+ }
+
+ // See if the value can be constructed cheaply.
+ if (value == 0) {
+ res = NewLIR2(kMips64Move, r_dest.GetReg(), rZERO);
+ } else if (IsUint<16>(value)) {
+ // Use OR with (unsigned) immediate to encode 16b unsigned int.
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZERO, value);
+ } else if (IsInt<16>(value)) {
+ // Use ADD with (signed) immediate to encode 16b signed int.
+ res = NewLIR3(kMips64Addiu, r_dest.GetReg(), rZERO, value);
+ } else {
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+ if (value & 0xffff)
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+ }
+
+ if (is_fp_reg) {
+ NewLIR2(kMips64Mtc1, r_dest.GetReg(), r_dest_save.GetReg());
+ FreeTemp(r_dest);
+ }
+
+ return res;
+}
+
+LIR* Mips64Mir2Lir::OpUnconditionalBranch(LIR* target) {
+ LIR* res = NewLIR1(kMips64B, 0 /* offset to be patched during assembly*/);
+ res->target = target;
+ return res;
+}
+
+LIR* Mips64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
+ Mips64OpCode opcode = kMips64Nop;
+ switch (op) {
+ case kOpBlx:
+ opcode = kMips64Jalr;
+ break;
+ case kOpBx:
+ return NewLIR2(kMips64Jalr, rZERO, r_dest_src.GetReg());
+ break;
+ default:
+ LOG(FATAL) << "Bad case in OpReg";
+ }
+ return NewLIR2(opcode, rRAd, r_dest_src.GetReg());
+}
+
+LIR* Mips64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
+ LIR *res;
+ bool neg = (value < 0);
+ int abs_value = (neg) ? -value : value;
+ bool short_form = (abs_value & 0xff) == abs_value;
+ bool is64bit = r_dest_src1.Is64Bit();
+ RegStorage r_scratch;
+ Mips64OpCode opcode = kMips64Nop;
+ switch (op) {
+ case kOpAdd:
+ return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
+ case kOpSub:
+ return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
+ default:
+ LOG(FATAL) << "Bad case in OpRegImm";
+ }
+ if (short_form) {
+ res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
+ } else {
+ if (is64bit) {
+ r_scratch = AllocTempWide();
+ res = LoadConstantWide(r_scratch, value);
+ } else {
+ r_scratch = AllocTemp();
+ res = LoadConstant(r_scratch, value);
+ }
+ if (op == kOpCmp) {
+ NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
+ } else {
+ NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
+ }
+ }
+ return res;
+}
+
+LIR* Mips64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest,
+ RegStorage r_src1, RegStorage r_src2) {
+ Mips64OpCode opcode = kMips64Nop;
+ bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit();
+
+ switch (op) {
+ case kOpAdd:
+ if (is64bit) {
+ opcode = kMips64Daddu;
+ } else {
+ opcode = kMips64Addu;
+ }
+ break;
+ case kOpSub:
+ if (is64bit) {
+ opcode = kMips64Dsubu;
+ } else {
+ opcode = kMips64Subu;
+ }
+ break;
+ case kOpAnd:
+ opcode = kMips64And;
+ break;
+ case kOpMul:
+ opcode = kMips64Mul;
+ break;
+ case kOpOr:
+ opcode = kMips64Or;
+ break;
+ case kOpXor:
+ opcode = kMips64Xor;
+ break;
+ case kOpLsl:
+ if (is64bit) {
+ opcode = kMips64Dsllv;
+ } else {
+ opcode = kMips64Sllv;
+ }
+ break;
+ case kOpLsr:
+ if (is64bit) {
+ opcode = kMips64Dsrlv;
+ } else {
+ opcode = kMips64Srlv;
+ }
+ break;
+ case kOpAsr:
+ if (is64bit) {
+ opcode = kMips64Dsrav;
+ } else {
+ opcode = kMips64Srav;
+ }
+ break;
+ case kOpAdc:
+ case kOpSbc:
+ LOG(FATAL) << "No carry bit on MIPS64";
+ break;
+ default:
+ LOG(FATAL) << "Bad case in OpRegRegReg";
+ break;
+ }
+ return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
+}
+
+LIR* Mips64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
+ LIR *res;
+ Mips64OpCode opcode = kMips64Nop;
+ bool short_form = true;
+ bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit();
+
+ switch (op) {
+ case kOpAdd:
+ if (is64bit) {
+ if (IS_SIMM16(value)) {
+ opcode = kMips64Daddiu;
+ } else {
+ short_form = false;
+ opcode = kMips64Daddu;
+ }
+ } else {
+ if (IS_SIMM16(value)) {
+ opcode = kMips64Addiu;
+ } else {
+ short_form = false;
+ opcode = kMips64Addu;
+ }
+ }
+ break;
+ case kOpSub:
+ if (is64bit) {
+ if (IS_SIMM16((-value))) {
+ value = -value;
+ opcode = kMips64Daddiu;
+ } else {
+ short_form = false;
+ opcode = kMips64Dsubu;
+ }
+ } else {
+ if (IS_SIMM16((-value))) {
+ value = -value;
+ opcode = kMips64Addiu;
+ } else {
+ short_form = false;
+ opcode = kMips64Subu;
+ }
+ }
+ break;
+ case kOpLsl:
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsll;
+ } else {
+ opcode = kMips64Dsll32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMips64Sll;
+ }
+ break;
+ case kOpLsr:
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsrl;
+ } else {
+ opcode = kMips64Dsrl32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMips64Srl;
+ }
+ break;
+ case kOpAsr:
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsra;
+ } else {
+ opcode = kMips64Dsra32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMips64Sra;
+ }
+ break;
+ case kOpAnd:
+ if (IS_UIMM16((value))) {
+ opcode = kMips64Andi;
+ } else {
+ short_form = false;
+ opcode = kMips64And;
+ }
+ break;
+ case kOpOr:
+ if (IS_UIMM16((value))) {
+ opcode = kMips64Ori;
+ } else {
+ short_form = false;
+ opcode = kMips64Or;
+ }
+ break;
+ case kOpXor:
+ if (IS_UIMM16((value))) {
+ opcode = kMips64Xori;
+ } else {
+ short_form = false;
+ opcode = kMips64Xor;
+ }
+ break;
+ case kOpMul:
+ short_form = false;
+ opcode = kMips64Mul;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in OpRegRegImm";
+ break;
+ }
+
+ if (short_form) {
+ res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value);
+ } else {
+ if (r_dest != r_src1) {
+ res = LoadConstant(r_dest, value);
+ NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
+ } else {
+ if (is64bit) {
+ RegStorage r_scratch = AllocTempWide();
+ res = LoadConstantWide(r_scratch, value);
+ NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
+ } else {
+ RegStorage r_scratch = AllocTemp();
+ res = LoadConstant(r_scratch, value);
+ NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
+ }
+ }
+ }
+ return res;
+}
+
+LIR* Mips64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
+ Mips64OpCode opcode = kMips64Nop;
+ LIR *res;
+ switch (op) {
+ case kOpMov:
+ opcode = kMips64Move;
+ break;
+ case kOpMvn:
+ return NewLIR3(kMips64Nor, r_dest_src1.GetReg(), r_src2.GetReg(), rZEROd);
+ case kOpNeg:
+ if (r_dest_src1.Is64Bit())
+ return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
+ else
+ return NewLIR3(kMips64Subu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
+ case kOpAdd:
+ case kOpAnd:
+ case kOpMul:
+ case kOpOr:
+ case kOpSub:
+ case kOpXor:
+ return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
+ case kOp2Byte:
+ res = NewLIR2(kMips64Seb, r_dest_src1.GetReg(), r_src2.GetReg());
+ return res;
+ case kOp2Short:
+ res = NewLIR2(kMips64Seh, r_dest_src1.GetReg(), r_src2.GetReg());
+ return res;
+ case kOp2Char:
+ return NewLIR3(kMips64Andi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
+ default:
+ LOG(FATAL) << "Bad case in OpRegReg";
+ UNREACHABLE();
+ }
+ return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
+}
+
+LIR* Mips64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
+ MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset,
+ RegStorage r_src, MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc,
+ RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
+ LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
+ LIR *res = nullptr;
+ DCHECK(r_dest.Is64Bit());
+ RegStorage r_dest_save = r_dest;
+ int is_fp_reg = r_dest.IsFloat();
+ if (is_fp_reg) {
+ DCHECK(r_dest.IsDouble());
+ r_dest = AllocTemp();
+ }
+
+ int bit31 = (value & UINT64_C(0x80000000)) != 0;
+
+ // Loads with 1 instruction.
+ if (IsUint<16>(value)) {
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
+ } else if (IsInt<16>(value)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
+ } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+ } else if (IsInt<32>(value)) {
+ // Loads with 2 instructions.
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
+ } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
+ NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
+ } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
+ (value >> 32) <= (32767 - bit31)) {
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
+ } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+ NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
+ } else {
+ int64_t tmp = value;
+ int shift_cnt = 0;
+ while ((tmp & 1) == 0) {
+ tmp >>= 1;
+ shift_cnt++;
+ }
+
+ if (IsUint<16>(tmp)) {
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else if (IsInt<16>(tmp)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else if (IsInt<32>(tmp)) {
+ // Loads with 3 instructions.
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp >> 16);
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else {
+ tmp = value >> 16;
+ shift_cnt = 16;
+ while ((tmp & 1) == 0) {
+ tmp >>= 1;
+ shift_cnt++;
+ }
+
+ if (IsUint<16>(tmp)) {
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else if (IsInt<16>(tmp)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else {
+ // Loads with 3-4 instructions.
+ uint64_t tmp2 = value;
+ if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp2 >> 16);
+ }
+ if ((tmp2 & 0xFFFF) != 0) {
+ if (res)
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp2);
+ else
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp2);
+ }
+ if (bit31) {
+ tmp2 += UINT64_C(0x100000000);
+ }
+ if (((tmp2 >> 32) & 0xFFFF) != 0) {
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
+ }
+ if (tmp2 & UINT64_C(0x800000000000)) {
+ tmp2 += UINT64_C(0x1000000000000);
+ }
+ if ((tmp2 >> 48) != 0) {
+ NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
+ }
+ }
+ }
+ }
+
+ if (is_fp_reg) {
+ NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
+ FreeTemp(r_dest);
+ }
+
+ return res;
+}
+
+/* Load value from base + scaled index. */
+LIR* Mips64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
+ int scale, OpSize size) {
+ LIR *first = NULL;
+ LIR *res;
+ RegStorage t_reg;
+ Mips64OpCode opcode = kMips64Nop;
+ bool is64bit = r_dest.Is64Bit();
+ if (is64bit) {
+ t_reg = AllocTempWide();
+ } else {
+ t_reg = AllocTemp();
+ }
+
+ if (r_dest.IsFloat()) {
+ DCHECK(r_dest.IsSingle());
+ DCHECK((size == k32) || (size == kSingle) || (size == kReference));
+ size = kSingle;
+ } else if (is64bit) {
+ size = k64;
+ } else {
+ if (size == kSingle)
+ size = k32;
+ }
+
+ if (!scale) {
+ if (is64bit) {
+ first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ } else {
+ first = NewLIR3(kMips64Addu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ }
+ } else {
+ first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+ NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ }
+
+ switch (size) {
+ case k64:
+ opcode = kMips64Ld;
+ break;
+ case kSingle:
+ opcode = kMips64Flwc1;
+ break;
+ case k32:
+ case kReference:
+ opcode = kMips64Lw;
+ break;
+ case kUnsignedHalf:
+ opcode = kMips64Lhu;
+ break;
+ case kSignedHalf:
+ opcode = kMips64Lh;
+ break;
+ case kUnsignedByte:
+ opcode = kMips64Lbu;
+ break;
+ case kSignedByte:
+ opcode = kMips64Lb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in LoadBaseIndexed";
+ }
+
+ res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg());
+ FreeTemp(t_reg);
+ return (first) ? first : res;
+}
+
+/* Store value base base + scaled index. */
+LIR* Mips64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
+ int scale, OpSize size) {
+ LIR *first = NULL;
+ Mips64OpCode opcode = kMips64Nop;
+ RegStorage t_reg = AllocTemp();
+
+ if (r_src.IsFloat()) {
+ DCHECK(r_src.IsSingle());
+ DCHECK((size == k32) || (size == kSingle) || (size == kReference));
+ size = kSingle;
+ } else {
+ if (size == kSingle)
+ size = k32;
+ }
+
+ if (!scale) {
+ first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ } else {
+ first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+ NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ }
+
+ switch (size) {
+ case kSingle:
+ opcode = kMips64Fswc1;
+ break;
+ case k32:
+ case kReference:
+ opcode = kMips64Sw;
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = kMips64Sh;
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = kMips64Sb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in StoreBaseIndexed";
+ }
+ NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg());
+ return first;
+}
+
+// FIXME: don't split r_dest into 2 containers.
+LIR* Mips64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
+/*
+ * Load value from base + displacement. Optionally perform null check
+ * on base (which must have an associated s_reg and MIR). If not
+ * performing null check, incoming MIR can be null. IMPORTANT: this
+ * code must not allocate any new temps. If a new register is needed
+ * and base and dest are the same, spill some other register to
+ * rlp and then restore.
+ */
+ LIR *res;
+ LIR *load = NULL;
+ Mips64OpCode opcode = kMips64Nop;
+ bool short_form = IS_SIMM16(displacement);
+
+ switch (size) {
+ case k64:
+ case kDouble:
+ r_dest = Check64BitReg(r_dest);
+ if (!r_dest.IsFloat())
+ opcode = kMips64Ld;
+ else
+ opcode = kMips64Fldc1;
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case k32:
+ case kSingle:
+ case kReference:
+ opcode = kMips64Lw;
+ if (r_dest.IsFloat()) {
+ opcode = kMips64Flwc1;
+ DCHECK(r_dest.IsSingle());
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ opcode = kMips64Lhu;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kSignedHalf:
+ opcode = kMips64Lh;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ opcode = kMips64Lbu;
+ break;
+ case kSignedByte:
+ opcode = kMips64Lb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
+ }
+
+ if (short_form) {
+ load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
+ } else {
+ RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
+ res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
+ load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
+ if (r_tmp != r_dest)
+ FreeTemp(r_tmp);
+ }
+
+ if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+ DCHECK_EQ(r_base, rs_rMIPS64_SP);
+ AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
+ }
+ return res;
+}
+
+LIR* Mips64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size, VolatileKind is_volatile) {
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+ displacement & 0x7)) {
+ // TODO: use lld/scd instructions for Mips64.
+ // Do atomic 64-bit load.
+ return GenAtomic64Load(r_base, displacement, r_dest);
+ }
+
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k64;
+ }
+ LIR* load;
+ load = LoadBaseDispBody(r_base, displacement, r_dest, size);
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ GenMemBarrier(kLoadAny);
+ }
+
+ return load;
+}
+
+// FIXME: don't split r_dest into 2 containers.
+LIR* Mips64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
+ LIR *res;
+ LIR *store = NULL;
+ Mips64OpCode opcode = kMips64Nop;
+ bool short_form = IS_SIMM16(displacement);
+
+ switch (size) {
+ case k64:
+ case kDouble:
+ r_src = Check64BitReg(r_src);
+ if (!r_src.IsFloat())
+ opcode = kMips64Sd;
+ else
+ opcode = kMips64Fsdc1;
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case k32:
+ case kSingle:
+ case kReference:
+ opcode = kMips64Sw;
+ if (r_src.IsFloat()) {
+ opcode = kMips64Fswc1;
+ DCHECK(r_src.IsSingle());
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = kMips64Sh;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = kMips64Sb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in StoreBaseDispBody";
+ }
+
+ if (short_form) {
+ store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
+ } else {
+ RegStorage r_scratch = AllocTemp();
+ res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
+ store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
+ FreeTemp(r_scratch);
+ }
+
+ if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+ DCHECK_EQ(r_base, rs_rMIPS64_SP);
+ AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
+ }
+
+ return res;
+}
+
+LIR* Mips64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size, VolatileKind is_volatile) {
+ if (is_volatile == kVolatile) {
+ // Ensure that prior accesses become visible to other threads first.
+ GenMemBarrier(kAnyStore);
+ }
+
+ LIR* store;
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+ displacement & 0x7)) {
+ // TODO - use lld/scd instructions for Mips64
+ // Do atomic 64-bit load.
+ store = GenAtomic64Store(r_base, displacement, r_src);
+ } else {
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k64;
+ }
+ store = StoreBaseDispBody(r_base, displacement, r_src, size);
+ }
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // Preserve order with respect to any subsequent volatile loads.
+ // We need StoreLoad, but that generally requires the most expensive barrier.
+ GenMemBarrier(kAnyAny);
+ }
+
+ return store;
+}
+
+LIR* Mips64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
+ LOG(FATAL) << "Unexpected use of OpMem for MIPS64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
+ UNUSED(cc, target);
+ LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
+ return OpReg(op, r_tgt);
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fcf4716..02d74a0 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -45,6 +45,7 @@
#include "dex/quick/arm/backend_arm.h"
#include "dex/quick/arm64/backend_arm64.h"
#include "dex/quick/mips/backend_mips.h"
+#include "dex/quick/mips64/backend_mips64.h"
#include "dex/quick/x86/backend_x86.h"
namespace art {
@@ -87,7 +88,17 @@
(1 << kPromoteCompilerTemps) |
0,
// 7 = kMips64.
- ~0U
+ (1 << kLoadStoreElimination) |
+ (1 << kLoadHoisting) |
+ (1 << kSuppressLoads) |
+ (1 << kNullCheckElimination) |
+ (1 << kPromoteRegs) |
+ (1 << kTrackLiveTemps) |
+ (1 << kSafeOptimizations) |
+ (1 << kBBOpt) |
+ (1 << kMatch) |
+ (1 << kPromoteCompilerTemps) |
+ 0
};
static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
"kDisabledOpts unexpected");
@@ -119,7 +130,7 @@
// 6 = kMips.
nullptr,
// 7 = kMips64.
- ""
+ nullptr
};
static_assert(sizeof(kSupportedTypes) == 8 * sizeof(char*), "kSupportedTypes unexpected");
@@ -430,7 +441,7 @@
// 6 = kMips.
nullptr,
// 7 = kMips64.
- kAllOpcodes
+ nullptr
};
static_assert(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), "kUnsupportedOpcodes unexpected");
@@ -451,7 +462,7 @@
// 6 = kMips.
0,
// 7 = kMips64.
- arraysize(kAllOpcodes),
+ 0
};
static_assert(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
"kUnsupportedOpcodesSize unexpected");
@@ -617,19 +628,20 @@
DCHECK(driver->GetCompilerOptions().IsCompilationEnabled());
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* const class_linker = runtime->GetClassLinker();
InstructionSet instruction_set = driver->GetInstructionSet();
if (instruction_set == kArm) {
instruction_set = kThumb2;
}
- CompilationUnit cu(driver->GetArenaPool(), instruction_set, driver, class_linker);
+ CompilationUnit cu(runtime->GetArenaPool(), instruction_set, driver, class_linker);
- // TODO: Mips64 is not yet implemented.
CHECK((cu.instruction_set == kThumb2) ||
(cu.instruction_set == kArm64) ||
(cu.instruction_set == kX86) ||
(cu.instruction_set == kX86_64) ||
- (cu.instruction_set == kMips));
+ (cu.instruction_set == kMips) ||
+ (cu.instruction_set == kMips64));
// TODO: set this from command line
constexpr bool compiler_flip_match = false;
@@ -798,6 +810,9 @@
case kMips:
mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
+ case kMips64:
+ mir_to_lir = Mips64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
case kX86:
// Fall-through.
case kX86_64:
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 67fb804..682fa28 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1355,7 +1355,7 @@
default: res = LocCReturn(); break;
}
Clobber(res.reg);
- if (cu_->instruction_set == kMips) {
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
MarkInUse(res.reg);
} else {
LockTemp(res.reg);
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 150bdac..a4df00e 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -49,11 +49,6 @@
DCHECK(method_verifier != NULL);
MethodReference ref = method_verifier->GetMethodReference();
bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags());
- // TODO: Check also for virtual/interface invokes when DEX-to-DEX supports devirtualization.
- if (!compile && !method_verifier->HasCheckCasts()) {
- return true;
- }
-
const VerifiedMethod* verified_method = VerifiedMethod::Create(method_verifier, compile);
if (verified_method == nullptr) {
// Do not report an error to the verifier. We'll just punt this later.
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 42d66be..5b90ba9 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -55,8 +55,8 @@
}
// Only need dequicken info for JIT so far.
- if (Runtime::Current()->UseJit()) {
- verified_method->GenerateDequickenMap(method_verifier);
+ if (Runtime::Current()->UseJit() && !verified_method->GenerateDequickenMap(method_verifier)) {
+ return nullptr;
}
}
@@ -194,9 +194,9 @@
*log2_max_gc_pc = i;
}
-void VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verifier) {
+bool VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verifier) {
if (method_verifier->HasFailures()) {
- return;
+ return false;
}
const DexFile::CodeItem* code_item = method_verifier->CodeItem();
const uint16_t* insns = code_item->insns_;
@@ -209,8 +209,11 @@
uint32_t dex_pc = inst->GetDexPc(insns);
verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
mirror::ArtMethod* method =
- method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick);
- CHECK(method != nullptr);
+ method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick, true);
+ if (method == nullptr) {
+ // It can be null if the line wasn't verified since it was unreachable.
+ return false;
+ }
// The verifier must know what the type of the object was or else we would have gotten a
// failure. Put the dex method index in the dequicken map since we need this to get number of
// arguments in the compiler.
@@ -220,7 +223,10 @@
uint32_t dex_pc = inst->GetDexPc(insns);
verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
mirror::ArtField* field = method_verifier->GetQuickFieldAccess(inst, line);
- CHECK(field != nullptr);
+ if (field == nullptr) {
+ // It can be null if the line wasn't verified since it was unreachable.
+ return false;
+ }
// The verifier must know what the type of the field was or else we would have gotten a
// failure. Put the dex field index in the dequicken map since we need this for lowering
// in the compiler.
@@ -228,6 +234,7 @@
dequicken_map_.Put(dex_pc, DexFileReference(field->GetDexFile(), field->GetDexFieldIndex()));
}
}
+ return true;
}
void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier) {
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 748bdcb..954cbf4 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -93,8 +93,8 @@
void GenerateDevirtMap(verifier::MethodVerifier* method_verifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Generate dequickening map into dequicken_map_.
- void GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
+ // Generate dequickening map into dequicken_map_. Returns false if there is an error.
+ bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Generate safe case set into safe_cast_set_.
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index df2b520..be6c41a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1621,7 +1621,7 @@
static void CheckAndClearResolveException(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(self->IsExceptionPending());
- mirror::Throwable* exception = self->GetException(nullptr);
+ mirror::Throwable* exception = self->GetException();
std::string temp;
const char* descriptor = exception->GetClass()->GetDescriptor(&temp);
const char* expected_exceptions[] = {
@@ -1766,7 +1766,7 @@
if (klass == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException(nullptr);
+ mirror::Throwable* exception = soa.Self()->GetException();
VLOG(compiler) << "Exception during type resolution: " << exception->Dump();
if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) {
// There's little point continuing compilation if the heap is exhausted.
@@ -1860,6 +1860,12 @@
CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
<< PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus();
+
+ // It is *very* problematic if there are verification errors in the boot classpath. For example,
+ // we rely on things working OK without verification when the decryption dialog is brought up.
+ // So abort in a debug build if we find this violated.
+ DCHECK(!manager->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class " <<
+ PrettyClass(klass.Get()) << " failed to fully verify.";
}
soa.Self()->AssertNoPendingException();
}
@@ -1983,7 +1989,7 @@
if (!success) {
CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException(nullptr);
+ mirror::Throwable* exception = soa.Self()->GetException();
VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
<< exception->Dump();
std::ostream* file_log = manager->GetCompiler()->
@@ -2184,8 +2190,10 @@
InstructionSetHasGenericJniStub(instruction_set_)) {
// Leaving this empty will trigger the generic JNI version
} else {
- compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
- CHECK(compiled_method != nullptr);
+ if (instruction_set_ != kMips64) { // Use generic JNI for Mips64 (temporarily).
+ compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
+ CHECK(compiled_method != nullptr);
+ }
}
} else if ((access_flags & kAccAbstract) != 0) {
// Abstract methods don't have code.
@@ -2246,7 +2254,7 @@
if (self->IsExceptionPending()) {
ScopedObjectAccess soa(self);
LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n"
- << self->GetException(nullptr)->Dump();
+ << self->GetException()->Dump();
}
}
@@ -2410,8 +2418,9 @@
std::string CompilerDriver::GetMemoryUsageString(bool extended) const {
std::ostringstream oss;
- const ArenaPool* arena_pool = GetArenaPool();
- gc::Heap* heap = Runtime::Current()->GetHeap();
+ Runtime* const runtime = Runtime::Current();
+ const ArenaPool* arena_pool = runtime->GetArenaPool();
+ gc::Heap* const heap = runtime->GetHeap();
oss << "arena alloc=" << PrettySize(arena_pool->GetBytesAllocated());
oss << " java alloc=" << PrettySize(heap->GetBytesAllocated());
#ifdef HAVE_MALLOC_H
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index f949667..28a8245 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -362,12 +362,6 @@
support_boot_image_fixup_ = support_boot_image_fixup;
}
- ArenaPool* GetArenaPool() {
- return &arena_pool_;
- }
- const ArenaPool* GetArenaPool() const {
- return &arena_pool_;
- }
SwapAllocator<void>& GetSwapSpaceAllocator() {
return *swap_space_allocator_.get();
}
@@ -606,9 +600,6 @@
void* compiler_context_;
- // Arena pool used by the compiler.
- ArenaPool arena_pool_;
-
bool support_boot_image_fixup_;
// DeDuplication data structures, these own the corresponding byte arrays.
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index b4732c8..c7f81ea 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -71,6 +71,17 @@
// Separate objects into multiple bins to optimize dirty memory use.
static constexpr bool kBinObjects = true;
+static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Class* klass = obj->GetClass();
+ CHECK_NE(PrettyClass(klass), "com.android.dex.Dex");
+}
+
+static void CheckNoDexObjects() {
+ ScopedObjectAccess soa(Thread::Current());
+ Runtime::Current()->GetHeap()->VisitObjects(CheckNoDexObjectsCallback, nullptr);
+}
+
bool ImageWriter::PrepareImageAddressSpace() {
target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
{
@@ -83,6 +94,16 @@
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->CollectGarbage(false); // Remove garbage.
+ // Dex caches must not have their dex fields set in the image. These are memory buffers of mapped
+ // dex files.
+ //
+ // We may open them in the unstarted-runtime code for class metadata. Their fields should all be
+ // reset in PruneNonImageClasses and the objects reclaimed in the GC. Make sure that's actually
+ // true.
+ if (kIsDebugBuild) {
+ CheckNoDexObjects();
+ }
+
if (!AllocMemory()) {
return false;
}
@@ -644,6 +665,9 @@
dex_cache->SetResolvedField(i, NULL);
}
}
+ // Clean the dex field. It might have been populated during the initialization phase, but
+ // contains data only valid during a real run.
+ dex_cache->SetFieldObject<false>(mirror::DexCache::DexOffset(), nullptr);
}
}
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 04efa21..beb5755 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -18,6 +18,7 @@
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
+#include "base/timing_logger.h"
#include "compiler_callbacks.h"
#include "dex/pass_manager.h"
#include "dex/quick_compiler_callbacks.h"
@@ -103,7 +104,8 @@
}
bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) {
- uint64_t start_time = NanoTime();
+ TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit));
+ const uint64_t start_time = NanoTime();
StackHandleScope<2> hs(self);
self->AssertNoPendingException();
Runtime* runtime = Runtime::Current();
@@ -113,14 +115,18 @@
return true; // Already compiled
}
Handle<mirror::Class> h_class(hs.NewHandle(h_method->GetDeclaringClass()));
- if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
- VLOG(jit) << "JIT failed to initialize " << PrettyMethod(h_method.Get());
- return false;
+ {
+ TimingLogger::ScopedTiming t2("Initializing", &logger);
+ if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+ VLOG(jit) << "JIT failed to initialize " << PrettyMethod(h_method.Get());
+ return false;
+ }
}
const DexFile* dex_file = h_class->GetDexCache()->GetDexFile();
MethodReference method_ref(dex_file, h_method->GetDexMethodIndex());
// Only verify if we don't already have verification results.
if (verification_results_->GetVerifiedMethod(method_ref) == nullptr) {
+ TimingLogger::ScopedTiming t2("Verifying", &logger);
std::string error;
if (verifier::MethodVerifier::VerifyMethod(h_method.Get(), true, &error) ==
verifier::MethodVerifier::kHardFailure) {
@@ -129,7 +135,16 @@
return false;
}
}
- CompiledMethod* compiled_method(compiler_driver_->CompileMethod(self, h_method.Get()));
+ CompiledMethod* compiled_method = nullptr;
+ {
+ TimingLogger::ScopedTiming t2("Compiling", &logger);
+ compiled_method = compiler_driver_->CompileMethod(self, h_method.Get());
+ }
+ {
+ TimingLogger::ScopedTiming t2("TrimMaps", &logger);
+ // Trim maps to reduce memory usage, TODO: measure how much this increases compile time.
+ runtime->GetArenaPool()->TrimMaps();
+ }
if (compiled_method == nullptr) {
return false;
}
@@ -137,7 +152,7 @@
// Don't add the method if we are supposed to be deoptimized.
bool result = false;
if (!runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) {
- const void* code = Runtime::Current()->GetClassLinker()->GetOatMethodQuickCodeFor(
+ const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(
h_method.Get());
if (code != nullptr) {
// Already have some compiled code, just use this instead of linking.
@@ -145,11 +160,13 @@
h_method->SetEntryPointFromQuickCompiledCode(code);
result = true;
} else {
+ TimingLogger::ScopedTiming t2("MakeExecutable", &logger);
result = MakeExecutable(compiled_method, h_method.Get());
}
}
// Remove the compiled method to save memory.
compiler_driver_->RemoveCompiledMethod(method_ref);
+ runtime->GetJit()->AddTimingLogger(logger);
return result;
}
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 95c2d40..d25acc7 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -20,6 +20,7 @@
#include "jni/quick/arm/calling_convention_arm.h"
#include "jni/quick/arm64/calling_convention_arm64.h"
#include "jni/quick/mips/calling_convention_mips.h"
+#include "jni/quick/mips64/calling_convention_mips64.h"
#include "jni/quick/x86/calling_convention_x86.h"
#include "jni/quick/x86_64/calling_convention_x86_64.h"
#include "utils.h"
@@ -38,6 +39,8 @@
return new arm64::Arm64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kMips:
return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ case kMips64:
+ return new mips64::Mips64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kX86_64:
@@ -111,6 +114,8 @@
return new arm64::Arm64JniCallingConvention(is_static, is_synchronized, shorty);
case kMips:
return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
+ case kMips64:
+ return new mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
case kX86_64:
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index ba73828..2d9e03a 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -36,6 +36,7 @@
#include "utils/arm/managed_register_arm.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/mips/managed_register_mips.h"
+#include "utils/mips64/managed_register_mips64.h"
#include "utils/x86/managed_register_x86.h"
#include "thread.h"
@@ -329,7 +330,8 @@
// 11. Save return value
FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation();
if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
- if (instruction_set == kMips && main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
+ if ((instruction_set == kMips || instruction_set == kMips64) &&
+ main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
return_save_location.Uint32Value() % 8 != 0) {
// Ensure doubles are 8-byte aligned for MIPS
return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
new file mode 100644
index 0000000..17325d6
--- /dev/null
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_mips64.h"
+
+#include "base/logging.h"
+#include "handle_scope-inl.h"
+#include "utils/mips64/managed_register_mips64.h"
+
+namespace art {
+namespace mips64 {
+
+static const GpuRegister kGpuArgumentRegisters[] = {
+ A0, A1, A2, A3, A4, A5, A6, A7
+};
+
+static const FpuRegister kFpuArgumentRegisters[] = {
+ F12, F13, F14, F15, F16, F17, F18, F19
+};
+
+// Calling convention
+ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(T9);
+}
+
+ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(T9);
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+ if (shorty[0] == 'F' || shorty[0] == 'D') {
+ return Mips64ManagedRegister::FromFpuRegister(F0);
+ } else if (shorty[0] == 'V') {
+ return Mips64ManagedRegister::NoRegister();
+ } else {
+ return Mips64ManagedRegister::FromGpuRegister(V0);
+ }
+}
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Mips64JniCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Mips64JniCallingConvention::IntReturnRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(V0);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(A0);
+}
+
+bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+ return false; // Everything moved to stack on entry.
+}
+
+bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+ return true;
+}
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
+ LOG(FATAL) << "Should not reach here";
+ return ManagedRegister::NoRegister();
+}
+
+FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+ CHECK(IsCurrentParamOnStack());
+ FrameOffset result =
+ FrameOffset(displacement_.Int32Value() + // displacement
+ sizeof(StackReference<mirror::ArtMethod>) + // Method ref
+ (itr_slots_ * sizeof(uint32_t))); // offset into in args
+ return result;
+}
+
+const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() {
+ // We spill the argument registers on MIPS64 to free them up for scratch use,
+ // we then assume all arguments are on the stack.
+ if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
+ int reg_index = 1; // we start from A1, A0 holds ArtMethod*.
+
+ // We need to choose the correct register size since the managed
+ // stack uses 32bit stack slots.
+ ResetIterator(FrameOffset(0));
+ while (HasNext()) {
+ if (reg_index < 8) {
+ if (IsCurrentParamAFloatOrDouble()) { // FP regs.
+ FpuRegister arg = kFpuArgumentRegisters[reg_index];
+ Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg);
+ entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4);
+ } else { // GP regs.
+ GpuRegister arg = kGpuArgumentRegisters[reg_index];
+ Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg);
+ entry_spills_.push_back(reg,
+ (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4);
+ }
+ // e.g. A1, A2, F3, A4, F5, F6, A7
+ reg_index++;
+ }
+
+ Next();
+ }
+ }
+ return entry_spills_;
+}
+
+// JNI calling convention
+
+Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
+ const char* shorty)
+ : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S0));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S1));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S2));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S3));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S4));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S5));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S6));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S7));
+
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(GP));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(SP));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S8));
+}
+
+uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
+ // Compute spill mask to agree with callee saves initialized in the constructor
+ uint32_t result = 0;
+ result = 1 << S0 | 1 << S1 | 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 |
+ 1 << S7 | 1 << GP | 1 << SP | 1 << S8;
+ return result;
+}
+
+ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
+ return Mips64ManagedRegister::FromGpuRegister(AT);
+}
+
+size_t Mips64JniCallingConvention::FrameSize() {
+ // Mehtod* and callee save area size, local reference segment state
+ size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ // Plus return value spill area size
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t Mips64JniCallingConvention::OutArgSize() {
+ return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
+}
+
+bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
+ return itr_args_ < 8;
+}
+
+bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
+ return !IsCurrentParamInRegister();
+}
+
+ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() {
+ CHECK(IsCurrentParamInRegister());
+ if (IsCurrentParamAFloatOrDouble()) {
+ return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]);
+ } else {
+ return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]);
+ }
+}
+
+FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
+ CHECK(IsCurrentParamOnStack());
+ size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_args_ - 8) * kFramePointerSize);
+ CHECK_LT(offset, OutArgSize());
+ return FrameOffset(offset);
+}
+
+size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() {
+ // all arguments including JNI args
+ size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
+
+ // Nothing on the stack unless there are more than 8 arguments
+ return (all_args > 8) ? all_args - 8 : 0;
+}
+} // namespace mips64
+} // namespace art
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
new file mode 100644
index 0000000..dc9273b
--- /dev/null
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+#define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace mips64 {
+
+constexpr size_t kFramePointerSize = 8;
+
+class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+ public:
+ Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+ : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ ~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // Managed runtime calling convention
+ ManagedRegister MethodRegister() OVERRIDE;
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+
+ private:
+ ManagedRegisterEntrySpills entry_spills_;
+
+ DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention);
+};
+
+class Mips64JniCallingConvention FINAL : public JniCallingConvention {
+ public:
+ explicit Mips64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+ ~Mips64JniCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister IntReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // JNI calling convention
+ size_t FrameSize() OVERRIDE;
+ size_t OutArgSize() OVERRIDE;
+ const std::vector<ManagedRegister>& CalleeSaveRegisters() const OVERRIDE {
+ return callee_save_regs_;
+ }
+ ManagedRegister ReturnScratchRegister() const OVERRIDE;
+ uint32_t CoreSpillMask() const OVERRIDE;
+ uint32_t FpSpillMask() const OVERRIDE {
+ return 0; // Floats aren't spilled in JNI down call
+ }
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+
+ // Mips64 does not need to extend small return types.
+ bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ return false;
+ }
+
+ protected:
+ size_t NumberOfOutgoingStackArgs() OVERRIDE;
+
+ private:
+ // TODO: these values aren't unique and can be shared amongst instances
+ std::vector<ManagedRegister> callee_save_regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention);
+};
+
+} // namespace mips64
+} // namespace art
+
+#endif // ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index c32a992..b3bb438 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -930,7 +930,7 @@
LOG(ERROR) << "Unexpected failure to resolve a method: "
<< PrettyMethod(it.GetMemberIndex(), *dex_file_, true);
soa.Self()->AssertPendingException();
- mirror::Throwable* exc = soa.Self()->GetException(nullptr);
+ mirror::Throwable* exc = soa.Self()->GetException();
std::string dump = exc->Dump();
LOG(FATAL) << dump;
}
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ed3f949..a6ab208 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -610,7 +610,7 @@
for (size_t i = 0; i < environment_size; ++i) {
HInstruction* current = environment->GetInstructionAt(i);
if (current == nullptr) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kNone, 0);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
continue;
}
@@ -620,37 +620,43 @@
DCHECK_EQ(current, location.GetConstant());
if (current->IsLongConstant()) {
int64_t value = current->AsLongConstant()->GetValue();
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, Low32Bits(value));
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, High32Bits(value));
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant,
+ Low32Bits(value));
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant,
+ High32Bits(value));
++i;
DCHECK_LT(i, environment_size);
} else if (current->IsDoubleConstant()) {
int64_t value = bit_cast<double, int64_t>(current->AsDoubleConstant()->GetValue());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, Low32Bits(value));
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, High32Bits(value));
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant,
+ Low32Bits(value));
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant,
+ High32Bits(value));
++i;
DCHECK_LT(i, environment_size);
} else if (current->IsIntConstant()) {
int32_t value = current->AsIntConstant()->GetValue();
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, value);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
} else if (current->IsNullConstant()) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, 0);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
} else {
DCHECK(current->IsFloatConstant());
int32_t value = bit_cast<float, int32_t>(current->AsFloatConstant()->GetValue());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, value);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
}
break;
}
case Location::kStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInStack, location.GetStackIndex());
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
+ location.GetStackIndex());
break;
}
case Location::kDoubleStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInStack, location.GetStackIndex());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInStack,
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
+ location.GetStackIndex());
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
location.GetHighStackIndex(kVRegSize));
++i;
DCHECK_LT(i, environment_size);
@@ -659,9 +665,9 @@
case Location::kRegister : {
int id = location.reg();
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, id);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
if (current->GetType() == Primitive::kPrimLong) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, id);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
++i;
DCHECK_LT(i, environment_size);
}
@@ -670,9 +676,9 @@
case Location::kFpuRegister : {
int id = location.reg();
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, id);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
if (current->GetType() == Primitive::kPrimDouble) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, id);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
++i;
DCHECK_LT(i, environment_size);
}
@@ -680,21 +686,30 @@
}
case Location::kFpuRegisterPair : {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, location.low());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, location.high());
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister,
+ location.low());
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister,
+ location.high());
++i;
DCHECK_LT(i, environment_size);
break;
}
case Location::kRegisterPair : {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, location.low());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, location.high());
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister,
+ location.low());
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister,
+ location.high());
++i;
DCHECK_LT(i, environment_size);
break;
}
+ case Location::kInvalid: {
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+ break;
+ }
+
default:
LOG(FATAL) << "Unexpected kind " << location.GetKind();
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 5146afa..b8f4572 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -153,17 +153,13 @@
virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
// Restores the register from the stack. Returns the size taken on stack.
virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
- virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- UNUSED(stack_index, reg_id);
- UNIMPLEMENTED(FATAL);
- UNREACHABLE();
- }
- virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- UNUSED(stack_index, reg_id);
- UNIMPLEMENTED(FATAL);
- UNREACHABLE();
- }
+
+ virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0;
+ virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0;
+
virtual bool NeedsTwoRegisters(Primitive::Type type) const = 0;
+ // Returns whether we should split long moves in parallel moves.
+ virtual bool ShouldSplitLongMoves() const { return false; }
bool IsCoreCalleeSaveRegister(int reg) const {
return (core_callee_save_mask_ & (1 << reg)) != 0;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 07d88de..a09ecb8 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -673,8 +673,19 @@
source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
__ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
+ } else if (source.IsConstant()) {
+ HConstant* constant = source.GetConstant();
+ int64_t value;
+ if (constant->IsLongConstant()) {
+ value = constant->AsLongConstant()->GetValue();
+ } else {
+ DCHECK(constant->IsDoubleConstant());
+ value = bit_cast<double, int64_t>(constant->AsDoubleConstant()->GetValue());
+ }
+ __ movl(Address(ESP, destination.GetStackIndex()), Immediate(Low32Bits(value)));
+ __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
} else {
- DCHECK(source.IsDoubleStackSlot());
+ DCHECK(source.IsDoubleStackSlot()) << source;
EmitParallelMoves(
Location::StackSlot(source.GetStackIndex()),
Location::StackSlot(destination.GetStackIndex()),
@@ -1555,8 +1566,6 @@
// Processing a Dex `int-to-byte' instruction.
if (in.IsRegister()) {
__ movsxb(out.AsRegister<Register>(), in.AsRegister<ByteRegister>());
- } else if (in.IsStackSlot()) {
- __ movsxb(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
} else {
DCHECK(in.GetConstant()->IsIntConstant());
int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
@@ -1760,6 +1769,8 @@
__ addsd(result, temp);
// result = double-to-float(result)
__ cvtsd2ss(result, result);
+ // Restore low.
+ __ addl(low, Immediate(0x80000000));
break;
}
@@ -1807,6 +1818,8 @@
__ addsd(result, constant);
// result = result + temp
__ addsd(result, temp);
+ // Restore low.
+ __ addl(low, Immediate(0x80000000));
break;
}
@@ -1892,10 +1905,15 @@
if (second.IsRegisterPair()) {
__ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
- } else {
+ } else if (second.IsDoubleStackSlot()) {
__ addl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
__ adcl(first.AsRegisterPairHigh<Register>(),
Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ } else {
+ DCHECK(second.IsConstant()) << second;
+ int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
+ __ addl(first.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
+ __ adcl(first.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
}
break;
}
@@ -1965,10 +1983,15 @@
if (second.IsRegisterPair()) {
__ subl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ sbbl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
- } else {
+ } else if (second.IsDoubleStackSlot()) {
__ subl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
__ sbbl(first.AsRegisterPairHigh<Register>(),
Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ } else {
+ DCHECK(second.IsConstant()) << second;
+ int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
+ __ subl(first.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
+ __ sbbl(first.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
}
break;
}
@@ -1999,12 +2022,6 @@
break;
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- // TODO: Currently this handles only stack operands:
- // - we don't have enough registers because we currently use Quick ABI.
- // - by the time we have a working register allocator we will probably change the ABI
- // and fix the above.
- // - we don't have a way yet to request operands on stack but the base line compiler
- // will leave the operands on the stack with Any().
locations->SetInAt(1, Location::Any());
locations->SetOut(Location::SameAsFirstInput());
// Needed for imul on 32bits with 64bits output.
@@ -2046,39 +2063,83 @@
}
case Primitive::kPrimLong: {
- DCHECK(second.IsDoubleStackSlot());
-
Register in1_hi = first.AsRegisterPairHigh<Register>();
Register in1_lo = first.AsRegisterPairLow<Register>();
- Address in2_hi(ESP, second.GetHighStackIndex(kX86WordSize));
- Address in2_lo(ESP, second.GetStackIndex());
Register eax = locations->GetTemp(0).AsRegister<Register>();
Register edx = locations->GetTemp(1).AsRegister<Register>();
DCHECK_EQ(EAX, eax);
DCHECK_EQ(EDX, edx);
- // input: in1 - 64 bits, in2 - 64 bits
+ // input: in1 - 64 bits, in2 - 64 bits.
// output: in1
// formula: in1.hi : in1.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
// parts: in1.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
// parts: in1.lo = (in1.lo * in2.lo)[31:0]
+ if (second.IsConstant()) {
+ DCHECK(second.GetConstant()->IsLongConstant());
- __ movl(eax, in2_hi);
- // eax <- in1.lo * in2.hi
- __ imull(eax, in1_lo);
- // in1.hi <- in1.hi * in2.lo
- __ imull(in1_hi, in2_lo);
- // in1.hi <- in1.lo * in2.hi + in1.hi * in2.lo
- __ addl(in1_hi, eax);
- // move in1_lo to eax to prepare for double precision
- __ movl(eax, in1_lo);
- // edx:eax <- in1.lo * in2.lo
- __ mull(in2_lo);
- // in1.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
- __ addl(in1_hi, edx);
- // in1.lo <- (in1.lo * in2.lo)[31:0];
- __ movl(in1_lo, eax);
+ int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
+ int32_t low_value = Low32Bits(value);
+ int32_t high_value = High32Bits(value);
+ Immediate low(low_value);
+ Immediate high(high_value);
+
+ __ movl(eax, high);
+ // eax <- in1.lo * in2.hi
+ __ imull(eax, in1_lo);
+ // in1.hi <- in1.hi * in2.lo
+ __ imull(in1_hi, low);
+ // in1.hi <- in1.lo * in2.hi + in1.hi * in2.lo
+ __ addl(in1_hi, eax);
+ // move in2_lo to eax to prepare for double precision
+ __ movl(eax, low);
+ // edx:eax <- in1.lo * in2.lo
+ __ mull(in1_lo);
+ // in1.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
+ __ addl(in1_hi, edx);
+ // in1.lo <- (in1.lo * in2.lo)[31:0];
+ __ movl(in1_lo, eax);
+ } else if (second.IsRegisterPair()) {
+ Register in2_hi = second.AsRegisterPairHigh<Register>();
+ Register in2_lo = second.AsRegisterPairLow<Register>();
+
+ __ movl(eax, in2_hi);
+ // eax <- in1.lo * in2.hi
+ __ imull(eax, in1_lo);
+ // in1.hi <- in1.hi * in2.lo
+ __ imull(in1_hi, in2_lo);
+ // in1.hi <- in1.lo * in2.hi + in1.hi * in2.lo
+ __ addl(in1_hi, eax);
+ // move in1_lo to eax to prepare for double precision
+ __ movl(eax, in1_lo);
+ // edx:eax <- in1.lo * in2.lo
+ __ mull(in2_lo);
+ // in1.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
+ __ addl(in1_hi, edx);
+ // in1.lo <- (in1.lo * in2.lo)[31:0];
+ __ movl(in1_lo, eax);
+ } else {
+ DCHECK(second.IsDoubleStackSlot()) << second;
+ Address in2_hi(ESP, second.GetHighStackIndex(kX86WordSize));
+ Address in2_lo(ESP, second.GetStackIndex());
+
+ __ movl(eax, in2_hi);
+ // eax <- in1.lo * in2.hi
+ __ imull(eax, in1_lo);
+ // in1.hi <- in1.hi * in2.lo
+ __ imull(in1_hi, in2_lo);
+ // in1.hi <- in1.lo * in2.hi + in1.hi * in2.lo
+ __ addl(in1_hi, eax);
+ // move in1_lo to eax to prepare for double precision
+ __ movl(eax, in1_lo);
+ // edx:eax <- in1.lo * in2.lo
+ __ mull(in2_lo);
+ // in1.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
+ __ addl(in1_hi, edx);
+ // in1.lo <- (in1.lo * in2.lo)[31:0];
+ __ movl(in1_lo, eax);
+ }
break;
}
@@ -2237,7 +2298,7 @@
}
void LocationsBuilderX86::VisitDiv(HDiv* div) {
- LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
+ LocationSummary::CallKind call_kind = (div->GetResultType() == Primitive::kPrimLong)
? LocationSummary::kCall
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
@@ -2306,8 +2367,10 @@
void LocationsBuilderX86::VisitRem(HRem* rem) {
Primitive::Type type = rem->GetResultType();
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(rem, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = (rem->GetResultType() == Primitive::kPrimLong)
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
switch (type) {
case Primitive::kPrimInt: {
@@ -2646,7 +2709,6 @@
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- // TODO: we set any here but we don't handle constants
locations->SetInAt(1, Location::Any());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
@@ -2674,18 +2736,24 @@
case Primitive::kPrimLong: {
if (right.IsRegisterPair()) {
__ cmpl(left.AsRegisterPairHigh<Register>(), right.AsRegisterPairHigh<Register>());
- } else {
- DCHECK(right.IsDoubleStackSlot());
+ } else if (right.IsDoubleStackSlot()) {
__ cmpl(left.AsRegisterPairHigh<Register>(),
Address(ESP, right.GetHighStackIndex(kX86WordSize)));
+ } else {
+ DCHECK(right.IsConstant()) << right;
+ __ cmpl(left.AsRegisterPairHigh<Register>(),
+ Immediate(High32Bits(right.GetConstant()->AsLongConstant()->GetValue())));
}
__ j(kLess, &less); // Signed compare.
__ j(kGreater, &greater); // Signed compare.
if (right.IsRegisterPair()) {
__ cmpl(left.AsRegisterPairLow<Register>(), right.AsRegisterPairLow<Register>());
- } else {
- DCHECK(right.IsDoubleStackSlot());
+ } else if (right.IsDoubleStackSlot()) {
__ cmpl(left.AsRegisterPairLow<Register>(), Address(ESP, right.GetStackIndex()));
+ } else {
+ DCHECK(right.IsConstant()) << right;
+ __ cmpl(left.AsRegisterPairLow<Register>(),
+ Immediate(Low32Bits(right.GetConstant()->AsLongConstant()->GetValue())));
}
break;
}
@@ -2770,7 +2838,12 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+
+ // The output overlaps in case of long: we don't want the low move to overwrite
+ // the object's location.
+ locations->SetOut(Location::RequiresRegister(),
+ (instruction->GetType() == Primitive::kPrimLong) ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
if (field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong)) {
// Long values can be loaded atomically into an XMM using movsd.
@@ -2827,6 +2900,7 @@
__ psrlq(temp, Immediate(32));
__ movd(out.AsRegisterPairHigh<Register>(), temp);
} else {
+ DCHECK_NE(base, out.AsRegisterPairLow<Register>());
__ movl(out.AsRegisterPairLow<Register>(), Address(base, offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
__ movl(out.AsRegisterPairHigh<Register>(), Address(base, kX86WordSize + offset));
@@ -3064,7 +3138,11 @@
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ // The output overlaps in case of long: we don't want the low move to overwrite
+ // the array's location.
+ locations->SetOut(Location::RequiresRegister(),
+ (instruction->GetType() == Primitive::kPrimLong) ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
@@ -3138,6 +3216,7 @@
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
Location out = locations->Out();
+ DCHECK_NE(obj, out.AsRegisterPairLow<Register>());
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
__ movl(out.AsRegisterPairLow<Register>(), Address(obj, offset));
@@ -3569,8 +3648,7 @@
DCHECK(destination.IsStackSlot()) << destination;
__ movl(Address(ESP, destination.GetStackIndex()), Immediate(value));
}
- } else {
- DCHECK(constant->IsFloatConstant());
+ } else if (constant->IsFloatConstant()) {
float value = constant->AsFloatConstant()->GetValue();
Immediate imm(bit_cast<float, int32_t>(value));
if (destination.IsFpuRegister()) {
@@ -3583,6 +3661,43 @@
DCHECK(destination.IsStackSlot()) << destination;
__ movl(Address(ESP, destination.GetStackIndex()), imm);
}
+ } else if (constant->IsLongConstant()) {
+ int64_t value = constant->AsLongConstant()->GetValue();
+ int32_t low_value = Low32Bits(value);
+ int32_t high_value = High32Bits(value);
+ Immediate low(low_value);
+ Immediate high(high_value);
+ if (destination.IsDoubleStackSlot()) {
+ __ movl(Address(ESP, destination.GetStackIndex()), low);
+ __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), high);
+ } else {
+ __ movl(destination.AsRegisterPairLow<Register>(), low);
+ __ movl(destination.AsRegisterPairHigh<Register>(), high);
+ }
+ } else {
+ DCHECK(constant->IsDoubleConstant());
+ double dbl_value = constant->AsDoubleConstant()->GetValue();
+ int64_t value = bit_cast<double, int64_t>(dbl_value);
+ int32_t low_value = Low32Bits(value);
+ int32_t high_value = High32Bits(value);
+ Immediate low(low_value);
+ Immediate high(high_value);
+ if (destination.IsFpuRegister()) {
+ XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
+ if (value == 0) {
+ // Easy handling of 0.0.
+ __ xorpd(dest, dest);
+ } else {
+ __ pushl(high);
+ __ pushl(low);
+ __ movsd(dest, Address(ESP, 0));
+ __ addl(ESP, Immediate(8));
+ }
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ __ movl(Address(ESP, destination.GetStackIndex()), low);
+ __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), high);
+ }
}
} else {
LOG(FATAL) << "Unimplemented move: " << destination << " <- " << source;
@@ -3650,6 +3765,33 @@
Exchange32(source.AsFpuRegister<XmmRegister>(), destination.GetStackIndex());
} else if (destination.IsFpuRegister() && source.IsStackSlot()) {
Exchange32(destination.AsFpuRegister<XmmRegister>(), source.GetStackIndex());
+ } else if (source.IsFpuRegister() && destination.IsDoubleStackSlot()) {
+ // Take advantage of the 16 bytes in the XMM register.
+ XmmRegister reg = source.AsFpuRegister<XmmRegister>();
+ Address stack(ESP, destination.GetStackIndex());
+ // Load the double into the high doubleword.
+ __ movhpd(reg, stack);
+
+ // Store the low double into the destination.
+ __ movsd(stack, reg);
+
+ // Move the high double to the low double.
+ __ psrldq(reg, Immediate(8));
+ } else if (destination.IsFpuRegister() && source.IsDoubleStackSlot()) {
+ // Take advantage of the 16 bytes in the XMM register.
+ XmmRegister reg = destination.AsFpuRegister<XmmRegister>();
+ Address stack(ESP, source.GetStackIndex());
+ // Load the double into the high doubleword.
+ __ movhpd(reg, stack);
+
+ // Store the low double into the destination.
+ __ movsd(stack, reg);
+
+ // Move the high double to the low double.
+ __ psrldq(reg, Immediate(8));
+ } else if (destination.IsDoubleStackSlot() && source.IsDoubleStackSlot()) {
+ Exchange(destination.GetStackIndex(), source.GetStackIndex());
+ Exchange(destination.GetHighStackIndex(kX86WordSize), source.GetHighStackIndex(kX86WordSize));
} else {
LOG(FATAL) << "Unimplemented: source: " << source << ", destination: " << destination;
}
@@ -3951,7 +4093,7 @@
__ xorl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ xorl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
}
- } else {
+ } else if (second.IsDoubleStackSlot()) {
if (instruction->IsAnd()) {
__ andl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
__ andl(first.AsRegisterPairHigh<Register>(),
@@ -3966,6 +4108,22 @@
__ xorl(first.AsRegisterPairHigh<Register>(),
Address(ESP, second.GetHighStackIndex(kX86WordSize)));
}
+ } else {
+ DCHECK(second.IsConstant()) << second;
+ int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
+ Immediate low(Low32Bits(value));
+ Immediate high(High32Bits(value));
+ if (instruction->IsAnd()) {
+ __ andl(first.AsRegisterPairLow<Register>(), low);
+ __ andl(first.AsRegisterPairHigh<Register>(), high);
+ } else if (instruction->IsOr()) {
+ __ orl(first.AsRegisterPairLow<Register>(), low);
+ __ orl(first.AsRegisterPairHigh<Register>(), high);
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.AsRegisterPairLow<Register>(), low);
+ __ xorl(first.AsRegisterPairHigh<Register>(), high);
+ }
}
}
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index f5a9b7d..c5763de 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -245,6 +245,8 @@
return type == Primitive::kPrimLong;
}
+ bool ShouldSplitLongMoves() const OVERRIDE { return true; }
+
Label* GetFrameEntryLabel() { return &frame_entry_label_; }
private:
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index fca9933..ec0cc3e 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -18,7 +18,28 @@
namespace art {
+// This visitor tries to simplify operations that yield a constant. For example
+// `input * 0` is replaced by a null constant.
+class InstructionWithAbsorbingInputSimplifier : public HGraphVisitor {
+ public:
+ explicit InstructionWithAbsorbingInputSimplifier(HGraph* graph) : HGraphVisitor(graph) {}
+
+ private:
+ void VisitShift(HBinaryOperation* shift);
+
+ void VisitAnd(HAnd* instruction) OVERRIDE;
+ void VisitMul(HMul* instruction) OVERRIDE;
+ void VisitOr(HOr* instruction) OVERRIDE;
+ void VisitRem(HRem* instruction) OVERRIDE;
+ void VisitShl(HShl* instruction) OVERRIDE;
+ void VisitShr(HShr* instruction) OVERRIDE;
+ void VisitSub(HSub* instruction) OVERRIDE;
+ void VisitUShr(HUShr* instruction) OVERRIDE;
+ void VisitXor(HXor* instruction) OVERRIDE;
+};
+
void HConstantFolding::Run() {
+ InstructionWithAbsorbingInputSimplifier simplifier(graph_);
// Process basic blocks in reverse post-order in the dominator tree,
// so that an instruction turned into a constant, used as input of
// another instruction, may possibly be used to turn that second
@@ -38,6 +59,8 @@
inst->AsBinaryOperation()->TryStaticEvaluation();
if (constant != nullptr) {
inst->GetBlock()->ReplaceAndRemoveInstructionWith(inst, constant);
+ } else {
+ inst->Accept(&simplifier);
}
} else if (inst->IsUnaryOperation()) {
// Constant folding: replace `op(a)' with a constant at compile
@@ -47,9 +70,166 @@
if (constant != nullptr) {
inst->GetBlock()->ReplaceAndRemoveInstructionWith(inst, constant);
}
+ } else if (inst->IsDivZeroCheck()) {
+ // We can safely remove the check if the input is a non-null constant.
+ HDivZeroCheck* check = inst->AsDivZeroCheck();
+ HInstruction* check_input = check->InputAt(0);
+ if (check_input->IsConstant() && !check_input->AsConstant()->IsZero()) {
+ check->ReplaceWith(check_input);
+ check->GetBlock()->RemoveInstruction(check);
+ }
}
}
}
}
+void InstructionWithAbsorbingInputSimplifier::VisitShift(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsShl() || instruction->IsShr() || instruction->IsUShr());
+ HInstruction* left = instruction->GetLeft();
+ if (left->IsConstant() && left->AsConstant()->IsZero()) {
+ // Replace code looking like
+ // SHL dst, 0, shift_amount
+ // with
+ // CONSTANT 0
+ instruction->ReplaceWith(left);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitAnd(HAnd* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+ if ((input_cst != nullptr) && input_cst->IsZero()) {
+ // Replace code looking like
+ // AND dst, src, 0
+ // with
+ // CONSTANT 0
+ instruction->ReplaceWith(input_cst);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitMul(HMul* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+ Primitive::Type type = instruction->GetType();
+ if (Primitive::IsIntOrLongType(type) &&
+ (input_cst != nullptr) && input_cst->IsZero()) {
+ // Replace code looking like
+ // MUL dst, src, 0
+ // with
+ // CONSTANT 0
+ // Integral multiplication by zero always yields zero, but floating-point
+ // multiplication by zero does not always do. For example `Infinity * 0.0`
+ // should yield a NaN.
+ instruction->ReplaceWith(input_cst);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitOr(HOr* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+
+ if (input_cst == nullptr) {
+ return;
+ }
+
+ if (Int64FromConstant(input_cst) == -1) {
+ // Replace code looking like
+ // OR dst, src, 0xFFF...FF
+ // with
+ // CONSTANT 0xFFF...FF
+ instruction->ReplaceWith(input_cst);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitRem(HRem* instruction) {
+ Primitive::Type type = instruction->GetType();
+
+ if (!Primitive::IsIntegralType(type)) {
+ return;
+ }
+
+ HBasicBlock* block = instruction->GetBlock();
+
+ if (instruction->GetLeft()->IsConstant() &&
+ instruction->GetLeft()->AsConstant()->IsZero()) {
+ // Replace code looking like
+ // REM dst, 0, src
+ // with
+ // CONSTANT 0
+ instruction->ReplaceWith(instruction->GetLeft());
+ block->RemoveInstruction(instruction);
+ }
+
+ HConstant* cst_right = instruction->GetRight()->AsConstant();
+ if (((cst_right != nullptr) &&
+ (cst_right->IsOne() || cst_right->IsMinusOne())) ||
+ (instruction->GetLeft() == instruction->GetRight())) {
+ // Replace code looking like
+ // REM dst, src, 1
+ // or
+ // REM dst, src, -1
+ // or
+ // REM dst, src, src
+ // with
+ // CONSTANT 0
+ ArenaAllocator* allocator = GetGraph()->GetArena();
+ block->ReplaceAndRemoveInstructionWith(instruction,
+ HConstant::NewConstant(allocator, type, 0));
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitShl(HShl* instruction) {
+ VisitShift(instruction);
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitShr(HShr* instruction) {
+ VisitShift(instruction);
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitSub(HSub* instruction) {
+ Primitive::Type type = instruction->GetType();
+
+ if (!Primitive::IsIntegralType(type)) {
+ return;
+ }
+
+ HBasicBlock* block = instruction->GetBlock();
+ ArenaAllocator* allocator = GetGraph()->GetArena();
+
+ // We assume that GVN has run before, so we only perform a pointer
+ // comparison. If for some reason the values are equal but the pointers are
+ // different, we are still correct and only miss an optimisation
+ // opportunity.
+ if (instruction->GetLeft() == instruction->GetRight()) {
+ // Replace code looking like
+ // SUB dst, src, src
+ // with
+ // CONSTANT 0
+ // Note that we cannot optimise `x - x` to `0` for floating-point. It does
+ // not work when `x` is an infinity.
+ block->ReplaceAndRemoveInstructionWith(instruction,
+ HConstant::NewConstant(allocator, type, 0));
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitUShr(HUShr* instruction) {
+ VisitShift(instruction);
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitXor(HXor* instruction) {
+ if (instruction->GetLeft() == instruction->GetRight()) {
+ // Replace code looking like
+ // XOR dst, src, src
+ // with
+ // CONSTANT 0
+ Primitive::Type type = instruction->GetType();
+ HBasicBlock* block = instruction->GetBlock();
+ ArenaAllocator* allocator = GetGraph()->GetArena();
+
+ block->ReplaceAndRemoveInstructionWith(instruction,
+ HConstant::NewConstant(allocator, type, 0));
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index a7f1f74..76b9f4f 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -362,6 +362,12 @@
Primitive::PrettyDescriptor(phi->GetType())));
}
}
+ if (phi->GetType() != HPhi::ToPhiType(phi->GetType())) {
+ AddError(StringPrintf("Phi %d in block %d does not have an expected phi type: %s",
+ phi->GetId(),
+ phi->GetBlock()->GetBlockId(),
+ Primitive::PrettyDescriptor(phi->GetType())));
+ }
}
void SSAChecker::VisitIf(HIf* instruction) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index b34957a..e22f7cc 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -124,8 +124,8 @@
resolved_method->GetAccessFlags(),
nullptr);
- HGraph* callee_graph =
- new (graph_->GetArena()) HGraph(graph_->GetArena(), graph_->GetCurrentInstructionId());
+ HGraph* callee_graph = new (graph_->GetArena()) HGraph(
+ graph_->GetArena(), graph_->IsDebuggable(), graph_->GetCurrentInstructionId());
OptimizingCompilerStats inline_stats;
HGraphBuilder builder(callee_graph,
@@ -155,15 +155,11 @@
}
// Run simple optimizations on the graph.
- SsaRedundantPhiElimination redundant_phi(callee_graph);
- SsaDeadPhiElimination dead_phi(callee_graph);
HDeadCodeElimination dce(callee_graph);
HConstantFolding fold(callee_graph);
InstructionSimplifier simplify(callee_graph, stats_);
HOptimization* optimizations[] = {
- &redundant_phi,
- &dead_phi,
&dce,
&fold,
&simplify,
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index fd99070..2ef19b9 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -27,6 +27,8 @@
: HGraphVisitor(graph), stats_(stats) {}
private:
+ void VisitShift(HBinaryOperation* shift);
+
void VisitSuspendCheck(HSuspendCheck* check) OVERRIDE;
void VisitEqual(HEqual* equal) OVERRIDE;
void VisitArraySet(HArraySet* equal) OVERRIDE;
@@ -34,6 +36,16 @@
void VisitNullCheck(HNullCheck* instruction) OVERRIDE;
void VisitArrayLength(HArrayLength* instruction) OVERRIDE;
void VisitCheckCast(HCheckCast* instruction) OVERRIDE;
+ void VisitAdd(HAdd* instruction) OVERRIDE;
+ void VisitAnd(HAnd* instruction) OVERRIDE;
+ void VisitDiv(HDiv* instruction) OVERRIDE;
+ void VisitMul(HMul* instruction) OVERRIDE;
+ void VisitOr(HOr* instruction) OVERRIDE;
+ void VisitShl(HShl* instruction) OVERRIDE;
+ void VisitShr(HShr* instruction) OVERRIDE;
+ void VisitSub(HSub* instruction) OVERRIDE;
+ void VisitUShr(HUShr* instruction) OVERRIDE;
+ void VisitXor(HXor* instruction) OVERRIDE;
OptimizingCompilerStats* stats_;
};
@@ -43,6 +55,29 @@
visitor.VisitInsertionOrder();
}
+namespace {
+
+bool AreAllBitsSet(HConstant* constant) {
+ return Int64FromConstant(constant) == -1;
+}
+
+} // namespace
+
+void InstructionSimplifierVisitor::VisitShift(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsShl() || instruction->IsShr() || instruction->IsUShr());
+ HConstant* input_cst = instruction->GetConstantRight();
+ HInstruction* input_other = instruction->GetLeastConstantLeft();
+
+ if ((input_cst != nullptr) && input_cst->IsZero()) {
+ // Replace code looking like
+ // SHL dst, src, 0
+ // with
+ // src
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
void InstructionSimplifierVisitor::VisitNullCheck(HNullCheck* null_check) {
HInstruction* obj = null_check->InputAt(0);
if (!obj->CanBeNull()) {
@@ -137,4 +172,234 @@
}
}
+void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+ HInstruction* input_other = instruction->GetLeastConstantLeft();
+ if ((input_cst != nullptr) && input_cst->IsZero()) {
+ // Replace code looking like
+ // ADD dst, src, 0
+ // with
+ // src
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+ HInstruction* input_other = instruction->GetLeastConstantLeft();
+
+ if ((input_cst != nullptr) && AreAllBitsSet(input_cst)) {
+ // Replace code looking like
+ // AND dst, src, 0xFFF...FF
+ // with
+ // src
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
+
+ // We assume that GVN has run before, so we only perform a pointer comparison.
+ // If for some reason the values are equal but the pointers are different, we
+ // are still correct and only miss an optimisation opportunity.
+ if (instruction->GetLeft() == instruction->GetRight()) {
+ // Replace code looking like
+ // AND dst, src, src
+ // with
+ // src
+ instruction->ReplaceWith(instruction->GetLeft());
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionSimplifierVisitor::VisitDiv(HDiv* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+ HInstruction* input_other = instruction->GetLeastConstantLeft();
+ Primitive::Type type = instruction->GetType();
+
+ if ((input_cst != nullptr) && input_cst->IsOne()) {
+ // Replace code looking like
+ // DIV dst, src, 1
+ // with
+ // src
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
+
+ if ((input_cst != nullptr) && input_cst->IsMinusOne() &&
+ (Primitive::IsFloatingPointType(type) || Primitive::IsIntOrLongType(type))) {
+ // Replace code looking like
+ // DIV dst, src, -1
+ // with
+ // NEG dst, src
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(
+ instruction, (new (GetGraph()->GetArena()) HNeg(type, input_other)));
+ }
+}
+
+void InstructionSimplifierVisitor::VisitMul(HMul* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+ HInstruction* input_other = instruction->GetLeastConstantLeft();
+ Primitive::Type type = instruction->GetType();
+ HBasicBlock* block = instruction->GetBlock();
+ ArenaAllocator* allocator = GetGraph()->GetArena();
+
+ if (input_cst == nullptr) {
+ return;
+ }
+
+ if (input_cst->IsOne()) {
+ // Replace code looking like
+ // MUL dst, src, 1
+ // with
+ // src
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
+
+ if (input_cst->IsMinusOne() &&
+ (Primitive::IsFloatingPointType(type) || Primitive::IsIntOrLongType(type))) {
+ // Replace code looking like
+ // MUL dst, src, -1
+ // with
+ // NEG dst, src
+ HNeg* neg = new (allocator) HNeg(type, input_other);
+ block->ReplaceAndRemoveInstructionWith(instruction, neg);
+ return;
+ }
+
+ if (Primitive::IsFloatingPointType(type) &&
+ ((input_cst->IsFloatConstant() && input_cst->AsFloatConstant()->GetValue() == 2.0f) ||
+ (input_cst->IsDoubleConstant() && input_cst->AsDoubleConstant()->GetValue() == 2.0))) {
+ // Replace code looking like
+ // FP_MUL dst, src, 2.0
+ // with
+ // FP_ADD dst, src, src
+ // The 'int' and 'long' cases are handled below.
+ block->ReplaceAndRemoveInstructionWith(instruction,
+ new (allocator) HAdd(type, input_other, input_other));
+ return;
+ }
+
+ if (Primitive::IsIntOrLongType(type)) {
+ int64_t factor = Int64FromConstant(input_cst);
+ // We expect the `0` case to have been handled in the constant folding pass.
+ DCHECK_NE(factor, 0);
+ if (IsPowerOfTwo(factor)) {
+ // Replace code looking like
+ // MUL dst, src, pow_of_2
+ // with
+ // SHL dst, src, log2(pow_of_2)
+ HIntConstant* shift = new (allocator) HIntConstant(WhichPowerOf2(factor));
+ block->InsertInstructionBefore(shift, instruction);
+ HShl* shl = new(allocator) HShl(type, input_other, shift);
+ block->ReplaceAndRemoveInstructionWith(instruction, shl);
+ }
+ }
+}
+
+void InstructionSimplifierVisitor::VisitOr(HOr* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+ HInstruction* input_other = instruction->GetLeastConstantLeft();
+
+ if ((input_cst != nullptr) && input_cst->IsZero()) {
+ // Replace code looking like
+ // OR dst, src, 0
+ // with
+ // src
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
+
+ // We assume that GVN has run before, so we only perform a pointer comparison.
+ // If for some reason the values are equal but the pointers are different, we
+ // are still correct and only miss an optimisation opportunity.
+ if (instruction->GetLeft() == instruction->GetRight()) {
+ // Replace code looking like
+ // OR dst, src, src
+ // with
+ // src
+ instruction->ReplaceWith(instruction->GetLeft());
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionSimplifierVisitor::VisitShl(HShl* instruction) {
+ VisitShift(instruction);
+}
+
+void InstructionSimplifierVisitor::VisitShr(HShr* instruction) {
+ VisitShift(instruction);
+}
+
+void InstructionSimplifierVisitor::VisitSub(HSub* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+ HInstruction* input_other = instruction->GetLeastConstantLeft();
+
+ if ((input_cst != nullptr) && input_cst->IsZero()) {
+ // Replace code looking like
+ // SUB dst, src, 0
+ // with
+ // src
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
+
+ Primitive::Type type = instruction->GetType();
+ if (!Primitive::IsIntegralType(type)) {
+ return;
+ }
+
+ HBasicBlock* block = instruction->GetBlock();
+ ArenaAllocator* allocator = GetGraph()->GetArena();
+
+ if (instruction->GetLeft()->IsConstant()) {
+ int64_t left = Int64FromConstant(instruction->GetLeft()->AsConstant());
+ if (left == 0) {
+ // Replace code looking like
+ // SUB dst, 0, src
+ // with
+ // NEG dst, src
+ // Note that we cannot optimise `0.0 - x` to `-x` for floating-point. When
+ // `x` is `0.0`, the former expression yields `0.0`, while the later
+ // yields `-0.0`.
+ HNeg* neg = new (allocator) HNeg(type, instruction->GetRight());
+ block->ReplaceAndRemoveInstructionWith(instruction, neg);
+ }
+ }
+}
+
+void InstructionSimplifierVisitor::VisitUShr(HUShr* instruction) {
+ VisitShift(instruction);
+}
+
+void InstructionSimplifierVisitor::VisitXor(HXor* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+ HInstruction* input_other = instruction->GetLeastConstantLeft();
+
+ if ((input_cst != nullptr) && input_cst->IsZero()) {
+ // Replace code looking like
+ // XOR dst, src, 0
+ // with
+ // src
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
+
+ if ((input_cst != nullptr) && AreAllBitsSet(input_cst)) {
+ // Replace code looking like
+ // XOR dst, src, 0xFFF...FF
+ // with
+ // NOT dst, src
+ HNot* bitwise_not = new (GetGraph()->GetArena()) HNot(instruction->GetType(), input_other);
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, bitwise_not);
+ return;
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 907eff1..0b0cfde 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -388,44 +388,44 @@
// Make sure we create a preheader of a loop when a header originally has two
// incoming blocks and one back edge.
// Bitsets are made of:
- // (constant0, constant4, constant5, phi in block 8, phi in block 4)
+ // (constant0, constant4, constant5, phi in block 8)
const char* expected =
"Block 0\n"
- " live in: (00000)\n"
- " live out: (11100)\n"
- " kill: (11100)\n"
+ " live in: (0000)\n"
+ " live out: (1110)\n"
+ " kill: (1110)\n"
"Block 1\n"
- " live in: (11100)\n"
- " live out: (01100)\n"
- " kill: (00000)\n"
+ " live in: (1110)\n"
+ " live out: (0110)\n"
+ " kill: (0000)\n"
"Block 2\n"
- " live in: (01000)\n"
- " live out: (00000)\n"
- " kill: (00000)\n"
+ " live in: (0100)\n"
+ " live out: (0000)\n"
+ " kill: (0000)\n"
"Block 3\n"
- " live in: (00100)\n"
- " live out: (00000)\n"
- " kill: (00000)\n"
+ " live in: (0010)\n"
+ " live out: (0000)\n"
+ " kill: (0000)\n"
"Block 4\n" // loop header
- " live in: (00000)\n"
- " live out: (00001)\n"
- " kill: (00001)\n"
+ " live in: (0001)\n"
+ " live out: (0001)\n"
+ " kill: (0000)\n"
"Block 5\n" // back edge
- " live in: (00001)\n"
- " live out: (00000)\n"
- " kill: (00000)\n"
+ " live in: (0001)\n"
+ " live out: (0001)\n"
+ " kill: (0000)\n"
"Block 6\n" // return block
- " live in: (00001)\n"
- " live out: (00000)\n"
- " kill: (00000)\n"
+ " live in: (0001)\n"
+ " live out: (0000)\n"
+ " kill: (0000)\n"
"Block 7\n" // exit block
- " live in: (00000)\n"
- " live out: (00000)\n"
- " kill: (00000)\n"
+ " live in: (0000)\n"
+ " live out: (0000)\n"
+ " kill: (0000)\n"
"Block 8\n" // synthesized pre header
- " live in: (00000)\n"
- " live out: (00000)\n"
- " kill: (00010)\n";
+ " live in: (0000)\n"
+ " live out: (0001)\n"
+ " kill: (0001)\n";
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 198cc15..566c0da 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -211,15 +211,25 @@
}
Location ToLow() const {
- return IsRegisterPair()
- ? Location::RegisterLocation(low())
- : Location::FpuRegisterLocation(low());
+ if (IsRegisterPair()) {
+ return Location::RegisterLocation(low());
+ } else if (IsFpuRegisterPair()) {
+ return Location::FpuRegisterLocation(low());
+ } else {
+ DCHECK(IsDoubleStackSlot());
+ return Location::StackSlot(GetStackIndex());
+ }
}
Location ToHigh() const {
- return IsRegisterPair()
- ? Location::RegisterLocation(high())
- : Location::FpuRegisterLocation(high());
+ if (IsRegisterPair()) {
+ return Location::RegisterLocation(high());
+ } else if (IsFpuRegisterPair()) {
+ return Location::FpuRegisterLocation(high());
+ } else {
+ DCHECK(IsDoubleStackSlot());
+ return Location::StackSlot(GetHighStackIndex(4));
+ }
}
static uintptr_t EncodeStackIndex(intptr_t stack_index) {
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index e51bbc3..a90ebce 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -673,10 +673,43 @@
return nullptr;
}
+HConstant* HBinaryOperation::GetConstantRight() const {
+ if (GetRight()->IsConstant()) {
+ return GetRight()->AsConstant();
+ } else if (IsCommutative() && GetLeft()->IsConstant()) {
+ return GetLeft()->AsConstant();
+ } else {
+ return nullptr;
+ }
+}
+
+// If `GetConstantRight()` returns one of the input, this returns the other
+// one. Otherwise it returns nullptr.
+HInstruction* HBinaryOperation::GetLeastConstantLeft() const {
+ HInstruction* most_constant_right = GetConstantRight();
+ if (most_constant_right == nullptr) {
+ return nullptr;
+ } else if (most_constant_right == GetLeft()) {
+ return GetRight();
+ } else {
+ return GetLeft();
+ }
+}
+
bool HCondition::IsBeforeWhenDisregardMoves(HIf* if_) const {
return this == if_->GetPreviousDisregardingMoves();
}
+HConstant* HConstant::NewConstant(ArenaAllocator* allocator, Primitive::Type type, int64_t val) {
+ if (type == Primitive::kPrimInt) {
+ DCHECK(IsInt<32>(val));
+ return new (allocator) HIntConstant(val);
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimLong);
+ return new (allocator) HLongConstant(val);
+ }
+}
+
bool HInstruction::Equals(HInstruction* other) const {
if (!InstructionTypeEquals(other)) return false;
DCHECK_EQ(GetKind(), other->GetKind());
@@ -907,7 +940,8 @@
} else {
if (!returns_void) {
// There will be multiple returns.
- return_value = new (allocator) HPhi(allocator, kNoRegNumber, 0, invoke->GetType());
+ return_value = new (allocator) HPhi(
+ allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()));
to->AddPhi(return_value->AsPhi());
}
for (size_t i = 0, e = to->GetPredecessors().Size(); i < e; ++i) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 8b56166..a35fa1d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -103,7 +103,7 @@
// Control-flow graph of a method. Contains a list of basic blocks.
class HGraph : public ArenaObject<kArenaAllocMisc> {
public:
- HGraph(ArenaAllocator* arena, int start_instruction_id = 0)
+ HGraph(ArenaAllocator* arena, bool debuggable = false, int start_instruction_id = 0)
: arena_(arena),
blocks_(arena, kDefaultNumberOfBlocks),
reverse_post_order_(arena, kDefaultNumberOfBlocks),
@@ -114,6 +114,7 @@
number_of_in_vregs_(0),
temporaries_vreg_slots_(0),
has_array_accesses_(false),
+ debuggable_(debuggable),
current_instruction_id_(start_instruction_id) {}
ArenaAllocator* GetArena() const { return arena_; }
@@ -132,8 +133,13 @@
// recognition. Returns whether it was successful in doing all these steps.
bool TryBuildingSsa() {
BuildDominatorTree();
+ // The SSA builder requires loops to all be natural. Specifically, the dead phi
+ // elimination phase checks the consistency of the graph when doing a post-order
+ // visit for eliminating dead phis: a dead phi can only have loop header phi
+ // users remaining when being visited.
+ if (!AnalyzeNaturalLoops()) return false;
TransformToSsa();
- return AnalyzeNaturalLoops();
+ return true;
}
void BuildDominatorTree();
@@ -208,6 +214,8 @@
has_array_accesses_ = value;
}
+ bool IsDebuggable() const { return debuggable_; }
+
HNullConstant* GetNullConstant();
private:
@@ -248,6 +256,11 @@
// Has array accesses. We can totally skip BCE if it's false.
bool has_array_accesses_;
+ // Indicates whether the graph should be compiled in a way that
+ // ensures full debuggability. If false, we can apply more
+ // aggressive optimizations that may limit the level of debugging.
+ const bool debuggable_;
+
// The current id to assign to a newly added instruction. See HInstruction.id_.
int32_t current_instruction_id_;
@@ -1096,6 +1109,7 @@
bool HasUses() const { return !uses_.IsEmpty() || !env_uses_.IsEmpty(); }
bool HasEnvironmentUses() const { return !env_uses_.IsEmpty(); }
+ bool HasNonEnvironmentUses() const { return !uses_.IsEmpty(); }
// Does this instruction strictly dominate `other_instruction`?
// Returns false if this instruction and `other_instruction` are the same.
@@ -1561,6 +1575,14 @@
virtual int32_t Evaluate(int32_t x, int32_t y) const = 0;
virtual int64_t Evaluate(int64_t x, int64_t y) const = 0;
+ // Returns an input that can legally be used as the right input and is
+ // constant, or nullptr.
+ HConstant* GetConstantRight() const;
+
+ // If `GetConstantRight()` returns one of the input, this returns the other
+ // one. Otherwise it returns nullptr.
+ HInstruction* GetLeastConstantLeft() const;
+
DECLARE_INSTRUCTION(BinaryOperation);
private:
@@ -1832,6 +1854,12 @@
bool CanBeMoved() const OVERRIDE { return true; }
+ virtual bool IsMinusOne() const { return false; }
+ virtual bool IsZero() const { return false; }
+ virtual bool IsOne() const { return false; }
+
+ static HConstant* NewConstant(ArenaAllocator* allocator, Primitive::Type type, int64_t val);
+
DECLARE_INSTRUCTION(Constant);
private:
@@ -1851,6 +1879,16 @@
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+ bool IsMinusOne() const OVERRIDE {
+ return bit_cast<uint32_t>(AsFloatConstant()->GetValue()) == bit_cast<uint32_t>((-1.0f));
+ }
+ bool IsZero() const OVERRIDE {
+ return AsFloatConstant()->GetValue() == 0.0f;
+ }
+ bool IsOne() const OVERRIDE {
+ return bit_cast<uint32_t>(AsFloatConstant()->GetValue()) == bit_cast<uint32_t>(1.0f);
+ }
+
DECLARE_INSTRUCTION(FloatConstant);
private:
@@ -1872,6 +1910,16 @@
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+ bool IsMinusOne() const OVERRIDE {
+ return bit_cast<uint64_t>(AsDoubleConstant()->GetValue()) == bit_cast<uint64_t>((-1.0));
+ }
+ bool IsZero() const OVERRIDE {
+ return AsDoubleConstant()->GetValue() == 0.0;
+ }
+ bool IsOne() const OVERRIDE {
+ return bit_cast<uint64_t>(AsDoubleConstant()->GetValue()) == bit_cast<uint64_t>(1.0);
+ }
+
DECLARE_INSTRUCTION(DoubleConstant);
private:
@@ -1917,6 +1965,10 @@
// method is an workaround until we fix the above.
bool ActAsNullConstant() const OVERRIDE { return value_ == 0; }
+ bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
+ bool IsZero() const OVERRIDE { return GetValue() == 0; }
+ bool IsOne() const OVERRIDE { return GetValue() == 1; }
+
DECLARE_INSTRUCTION(IntConstant);
private:
@@ -1937,6 +1989,10 @@
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+ bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
+ bool IsZero() const OVERRIDE { return GetValue() == 0; }
+ bool IsOne() const OVERRIDE { return GetValue() == 1; }
+
DECLARE_INSTRUCTION(LongConstant);
private:
@@ -2498,6 +2554,19 @@
inputs_.SetSize(number_of_inputs);
}
+ // Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
+ static Primitive::Type ToPhiType(Primitive::Type type) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar:
+ return Primitive::kPrimInt;
+ default:
+ return type;
+ }
+ }
+
size_t InputCount() const OVERRIDE { return inputs_.Size(); }
void AddInput(HInstruction* input);
@@ -3289,8 +3358,19 @@
if (kIsDebugBuild) {
if (instruction != nullptr) {
for (size_t i = 0, e = moves_.Size(); i < e; ++i) {
- DCHECK_NE(moves_.Get(i).GetInstruction(), instruction)
- << "Doing parallel moves for the same instruction.";
+ if (moves_.Get(i).GetInstruction() == instruction) {
+ // Special case the situation where the move is for the spill slot
+ // of the instruction.
+ if ((GetPrevious() == instruction)
+ || ((GetPrevious() == nullptr)
+ && instruction->IsPhi()
+ && instruction->GetBlock() == GetBlock())) {
+ DCHECK_NE(destination.GetKind(), moves_.Get(i).GetDestination().GetKind())
+ << "Doing parallel moves for the same instruction.";
+ } else {
+ DCHECK(false) << "Doing parallel moves for the same instruction.";
+ }
+ }
}
}
for (size_t i = 0, e = moves_.Size(); i < e; ++i) {
@@ -3441,6 +3521,12 @@
DISALLOW_COPY_AND_ASSIGN(HBlocksInLoopIterator);
};
+inline int64_t Int64FromConstant(HConstant* constant) {
+ DCHECK(constant->IsIntConstant() || constant->IsLongConstant());
+ return constant->IsIntConstant() ? constant->AsIntConstant()->GetValue()
+ : constant->AsLongConstant()->GetValue();
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index eb98424..475d98c 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -298,8 +298,6 @@
const DexCompilationUnit& dex_compilation_unit,
PassInfoPrinter* pass_info_printer,
StackHandleScopeCollection* handles) {
- SsaRedundantPhiElimination redundant_phi(graph);
- SsaDeadPhiElimination dead_phi(graph);
HDeadCodeElimination dce(graph);
HConstantFolding fold1(graph);
InstructionSimplifier simplify1(graph, stats);
@@ -317,8 +315,6 @@
IntrinsicsRecognizer intrinsics(graph, dex_compilation_unit.GetDexFile(), driver);
HOptimization* optimizations[] = {
- &redundant_phi,
- &dead_phi,
&intrinsics,
&dce,
&fold1,
@@ -461,7 +457,8 @@
ArenaPool pool;
ArenaAllocator arena(&pool);
- HGraph* graph = new (&arena) HGraph(&arena);
+ HGraph* graph = new (&arena) HGraph(
+ &arena, compiler_driver->GetCompilerOptions().GetDebuggable());
// For testing purposes, we put a special marker on method names that should be compiled
// with this compiler. This makes sure we're not regressing.
@@ -523,7 +520,7 @@
dex_file,
dex_compilation_unit,
&pass_info_printer);
- } else if (shouldOptimize && RegisterAllocator::Supports(instruction_set)) {
+ } else if (shouldOptimize && can_allocate_registers) {
LOG(FATAL) << "Could not allocate registers in optimizing compiler";
UNREACHABLE();
} else {
diff --git a/compiler/optimizing/primitive_type_propagation.cc b/compiler/optimizing/primitive_type_propagation.cc
index fe23fcf..c20c8a1 100644
--- a/compiler/optimizing/primitive_type_propagation.cc
+++ b/compiler/optimizing/primitive_type_propagation.cc
@@ -33,7 +33,7 @@
// to merge with a void type, we should use the existing one.
return new_type == Primitive::kPrimVoid
? existing
- : new_type;
+ : HPhi::ToPhiType(new_type);
}
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 748ab22..cecc210 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -16,6 +16,7 @@
#include "register_allocator.h"
+#include <iostream>
#include <sstream>
#include "base/bit_vector-inl.h"
@@ -32,6 +33,9 @@
// allocate SRegister.
static int GetHighForLowRegister(int reg) { return reg + 1; }
static bool IsLowRegister(int reg) { return (reg & 1) == 0; }
+static bool IsLowOfUnalignedPairInterval(LiveInterval* low) {
+ return GetHighForLowRegister(low->GetRegister()) != low->GetHighInterval()->GetRegister();
+}
RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
CodeGenerator* codegen,
@@ -70,28 +74,13 @@
reserved_out_slots_ = 1 + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
}
-bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph,
+bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
InstructionSet instruction_set) {
- if (!Supports(instruction_set)) {
- return false;
- }
- if (instruction_set == kArm64
+ return instruction_set == kArm64
|| instruction_set == kX86_64
|| instruction_set == kArm
- || instruction_set == kThumb2) {
- return true;
- }
- for (size_t i = 0, e = graph.GetBlocks().Size(); i < e; ++i) {
- for (HInstructionIterator it(graph.GetBlocks().Get(i)->GetInstructions());
- !it.Done();
- it.Advance()) {
- HInstruction* current = it.Current();
- if (instruction_set == kX86 && current->GetType() == Primitive::kPrimLong) {
- return false;
- }
- }
- }
- return true;
+ || instruction_set == kX86
+ || instruction_set == kThumb2;
}
static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
@@ -771,8 +760,15 @@
return false;
}
- if (current->IsLowInterval() && free_until[GetHighForLowRegister(reg)] == 0) {
- return false;
+ if (current->IsLowInterval()) {
+ // If the high register of this interval is not available, we need to spill.
+ int high_reg = current->GetHighInterval()->GetRegister();
+ if (high_reg == kNoRegister) {
+ high_reg = GetHighForLowRegister(reg);
+ }
+ if (free_until[high_reg] == 0) {
+ return false;
+ }
}
current->SetRegister(reg);
@@ -831,16 +827,18 @@
return reg;
}
-bool RegisterAllocator::TrySplitNonPairIntervalAt(size_t position,
- size_t first_register_use,
- size_t* next_use) {
+bool RegisterAllocator::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,
+ size_t first_register_use,
+ size_t* next_use) {
for (size_t i = 0, e = active_.Size(); i < e; ++i) {
LiveInterval* active = active_.Get(i);
DCHECK(active->HasRegister());
+ if (active->IsFixed()) continue;
+ if (active->IsHighInterval()) continue;
+ if (first_register_use > next_use[active->GetRegister()]) continue;
+
// Split the first interval found.
- if (first_register_use <= next_use[active->GetRegister()]
- && !active->IsLowInterval()
- && !active->IsHighInterval()) {
+ if (!active->IsLowInterval() || IsLowOfUnalignedPairInterval(active)) {
LiveInterval* split = Split(active, position);
active_.DeleteAt(i);
if (split != active) {
@@ -921,7 +919,7 @@
// When allocating the low part, we made sure the high register was available.
DCHECK_LT(first_register_use, next_use[reg]);
} else if (current->IsLowInterval()) {
- reg = FindAvailableRegisterPair(next_use, current->GetStart());
+ reg = FindAvailableRegisterPair(next_use, first_register_use);
// We should spill if both registers are not available.
should_spill = (first_register_use >= next_use[reg])
|| (first_register_use >= next_use[GetHighForLowRegister(reg)]);
@@ -934,14 +932,17 @@
DCHECK_NE(reg, kNoRegister);
if (should_spill) {
DCHECK(!current->IsHighInterval());
- bool is_allocation_at_use_site = (current->GetStart() == (first_register_use - 1));
+ bool is_allocation_at_use_site = (current->GetStart() >= (first_register_use - 1));
if (current->IsLowInterval()
&& is_allocation_at_use_site
- && TrySplitNonPairIntervalAt(current->GetStart(), first_register_use, next_use)) {
+ && TrySplitNonPairOrUnalignedPairIntervalAt(current->GetStart(),
+ first_register_use,
+ next_use)) {
// If we're allocating a register for `current` because the instruction at
// that position requires it, but we think we should spill, then there are
- // non-pair intervals blocking the allocation. We split the first
- // interval found, and put ourselves first in the `unhandled_` list.
+ // non-pair intervals or unaligned pair intervals blocking the allocation.
+ // We split the first interval found, and put ourselves first in the
+ // `unhandled_` list.
LiveInterval* existing = unhandled_->Peek();
DCHECK(existing->IsHighInterval());
DCHECK_EQ(existing->GetLowInterval(), current);
@@ -951,10 +952,15 @@
// register, we split this interval just before its first register use.
AllocateSpillSlotFor(current);
LiveInterval* split = Split(current, first_register_use - 1);
- DCHECK_NE(current, split) << "There is not enough registers available for "
- << split->GetParent()->GetDefinedBy()->DebugName() << " "
- << split->GetParent()->GetDefinedBy()->GetId()
- << " at " << first_register_use - 1;
+ if (current == split) {
+ DumpInterval(std::cerr, current);
+ DumpAllIntervals(std::cerr);
+ // This situation has the potential to infinite loop, so we make it a non-debug CHECK.
+ CHECK(false) << "There is not enough registers available for "
+ << split->GetParent()->GetDefinedBy()->DebugName() << " "
+ << split->GetParent()->GetDefinedBy()->GetId()
+ << " at " << first_register_use - 1;
+ }
AddSorted(unhandled_, split);
}
return false;
@@ -1203,7 +1209,24 @@
|| destination.IsDoubleStackSlot();
}
-void RegisterAllocator::AddInputMoveFor(HInstruction* user,
+void RegisterAllocator::AddMove(HParallelMove* move,
+ Location source,
+ Location destination,
+ HInstruction* instruction,
+ Primitive::Type type) const {
+ if (type == Primitive::kPrimLong
+ && codegen_->ShouldSplitLongMoves()
+ // The parallel move resolver knows how to deal with long constants.
+ && !source.IsConstant()) {
+ move->AddMove(source.ToLow(), destination.ToLow(), instruction);
+ move->AddMove(source.ToHigh(), destination.ToHigh(), nullptr);
+ } else {
+ move->AddMove(source, destination, instruction);
+ }
+}
+
+void RegisterAllocator::AddInputMoveFor(HInstruction* input,
+ HInstruction* user,
Location source,
Location destination) const {
if (source.Equals(destination)) return;
@@ -1222,7 +1245,7 @@
move = previous->AsParallelMove();
}
DCHECK_EQ(move->GetLifetimePosition(), user->GetLifetimePosition());
- move->AddMove(source, destination, nullptr);
+ AddMove(move, source, destination, nullptr, input->GetType());
}
static bool IsInstructionStart(size_t position) {
@@ -1251,8 +1274,16 @@
at = liveness_.GetInstructionFromPosition((position + 1) / 2);
// Note that parallel moves may have already been inserted, so we explicitly
// ask for the first instruction of the block: `GetInstructionFromPosition` does
- // not contain the moves.
+ // not contain the `HParallelMove` instructions.
at = at->GetBlock()->GetFirstInstruction();
+
+ if (at->GetLifetimePosition() < position) {
+ // We may insert moves for split siblings and phi spills at the beginning of the block.
+ // Since this is a different lifetime position, we need to go to the next instruction.
+ DCHECK(at->IsParallelMove());
+ at = at->GetNext();
+ }
+
if (at->GetLifetimePosition() != position) {
DCHECK_GT(at->GetLifetimePosition(), position);
move = new (allocator_) HParallelMove(allocator_);
@@ -1294,7 +1325,7 @@
}
}
DCHECK_EQ(move->GetLifetimePosition(), position);
- move->AddMove(source, destination, instruction);
+ AddMove(move, source, destination, instruction, instruction->GetType());
}
void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block,
@@ -1324,7 +1355,7 @@
} else {
move = previous->AsParallelMove();
}
- move->AddMove(source, destination, instruction);
+ AddMove(move, source, destination, instruction, instruction->GetType());
}
void RegisterAllocator::InsertParallelMoveAtEntryOf(HBasicBlock* block,
@@ -1336,14 +1367,15 @@
HInstruction* first = block->GetFirstInstruction();
HParallelMove* move = first->AsParallelMove();
+ size_t position = block->GetLifetimeStart();
// This is a parallel move for connecting blocks. We need to differentiate
// it with moves for connecting siblings in a same block, and input moves.
- if (move == nullptr || move->GetLifetimePosition() != block->GetLifetimeStart()) {
+ if (move == nullptr || move->GetLifetimePosition() != position) {
move = new (allocator_) HParallelMove(allocator_);
- move->SetLifetimePosition(block->GetLifetimeStart());
+ move->SetLifetimePosition(position);
block->InsertInstructionBefore(move, first);
}
- move->AddMove(source, destination, instruction);
+ AddMove(move, source, destination, instruction, instruction->GetType());
}
void RegisterAllocator::InsertMoveAfter(HInstruction* instruction,
@@ -1367,7 +1399,7 @@
move->SetLifetimePosition(position);
instruction->GetBlock()->InsertInstructionBefore(move, instruction->GetNext());
}
- move->AddMove(source, destination, instruction);
+ AddMove(move, source, destination, instruction, instruction->GetType());
}
void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
@@ -1402,7 +1434,7 @@
if (expected_location.IsUnallocated()) {
locations->SetInAt(use->GetInputIndex(), source);
} else if (!expected_location.IsConstant()) {
- AddInputMoveFor(use->GetUser(), source, expected_location);
+ AddInputMoveFor(interval->GetDefinedBy(), use->GetUser(), source, expected_location);
}
} else {
DCHECK(use->GetUser()->IsInvoke());
@@ -1657,7 +1689,7 @@
Location source = input->GetLiveInterval()->GetLocationAt(
predecessor->GetLifetimeEnd() - 1);
Location destination = phi->GetLiveInterval()->ToLocation();
- InsertParallelMoveAtExitOf(predecessor, nullptr, source, destination);
+ InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
}
}
}
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 579f069..fcc6112 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -66,13 +66,6 @@
bool log_fatal_on_failure);
static bool CanAllocateRegistersFor(const HGraph& graph, InstructionSet instruction_set);
- static bool Supports(InstructionSet instruction_set) {
- return instruction_set == kArm
- || instruction_set == kArm64
- || instruction_set == kThumb2
- || instruction_set == kX86
- || instruction_set == kX86_64;
- }
size_t GetNumberOfSpillSlots() const {
return int_spill_slots_.Size()
@@ -121,12 +114,21 @@
Location source,
Location destination) const;
void InsertMoveAfter(HInstruction* instruction, Location source, Location destination) const;
- void AddInputMoveFor(HInstruction* user, Location source, Location destination) const;
+ void AddInputMoveFor(HInstruction* input,
+ HInstruction* user,
+ Location source,
+ Location destination) const;
void InsertParallelMoveAt(size_t position,
HInstruction* instruction,
Location source,
Location destination) const;
+ void AddMove(HParallelMove* move,
+ Location source,
+ Location destination,
+ HInstruction* instruction,
+ Primitive::Type type) const;
+
// Helper methods.
void AllocateRegistersInternal();
void ProcessInstruction(HInstruction* instruction);
@@ -136,9 +138,11 @@
int FindAvailableRegisterPair(size_t* next_use, size_t starting_at) const;
int FindAvailableRegister(size_t* next_use) const;
- // Try splitting an active non-pair interval at the given `position`.
+ // Try splitting an active non-pair or unaligned pair interval at the given `position`.
// Returns whether it was successful at finding such an interval.
- bool TrySplitNonPairIntervalAt(size_t position, size_t first_register_use, size_t* next_use);
+ bool TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,
+ size_t first_register_use,
+ size_t* next_use);
ArenaAllocator* const allocator_;
CodeGenerator* const codegen_;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 3dc7505..ba11e90 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -22,6 +22,158 @@
namespace art {
+/**
+ * A debuggable application may require to reviving phis, to ensure their
+ * associated DEX register is available to a debugger. This class implements
+ * the logic for statement (c) of the SsaBuilder (see ssa_builder.h). It
+ * also makes sure that phis with incompatible input types are not revived
+ * (statement (b) of the SsaBuilder).
+ *
+ * This phase must be run after detecting dead phis through the
+ * DeadPhiElimination phase, and before deleting the dead phis.
+ */
+class DeadPhiHandling : public ValueObject {
+ public:
+ explicit DeadPhiHandling(HGraph* graph)
+ : graph_(graph), worklist_(graph->GetArena(), kDefaultWorklistSize) {}
+
+ void Run();
+
+ private:
+ void VisitBasicBlock(HBasicBlock* block);
+ void ProcessWorklist();
+ void AddToWorklist(HPhi* phi);
+ void AddDependentInstructionsToWorklist(HPhi* phi);
+ bool UpdateType(HPhi* phi);
+
+ HGraph* const graph_;
+ GrowableArray<HPhi*> worklist_;
+
+ static constexpr size_t kDefaultWorklistSize = 8;
+
+ DISALLOW_COPY_AND_ASSIGN(DeadPhiHandling);
+};
+
+bool DeadPhiHandling::UpdateType(HPhi* phi) {
+ Primitive::Type existing = phi->GetType();
+ DCHECK(phi->IsLive());
+
+ bool conflict = false;
+ Primitive::Type new_type = existing;
+ for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
+ HInstruction* input = phi->InputAt(i);
+ if (input->IsPhi() && input->AsPhi()->IsDead()) {
+ // We are doing a reverse post order visit of the graph, reviving
+ // phis that have environment uses and updating their types. If an
+ // input is a phi, and it is dead (because its input types are
+ // conflicting), this phi must be marked dead as well.
+ conflict = true;
+ break;
+ }
+ Primitive::Type input_type = HPhi::ToPhiType(input->GetType());
+
+ // The only acceptable transitions are:
+ // - From void to typed: first time we update the type of this phi.
+ // - From int to reference (or reference to int): the phi has to change
+ // to reference type. If the integer input cannot be converted to a
+ // reference input, the phi will remain dead.
+ if (new_type == Primitive::kPrimVoid) {
+ new_type = input_type;
+ } else if (new_type == Primitive::kPrimNot && input_type == Primitive::kPrimInt) {
+ HInstruction* equivalent = SsaBuilder::GetReferenceTypeEquivalent(input);
+ if (equivalent == nullptr) {
+ conflict = true;
+ break;
+ } else {
+ phi->ReplaceInput(equivalent, i);
+ if (equivalent->IsPhi()) {
+ DCHECK_EQ(equivalent->GetType(), Primitive::kPrimNot);
+ // We created a new phi, but that phi has the same inputs as the old phi. We
+ // add it to the worklist to ensure its inputs can also be converted to reference.
+ // If not, it will remain dead, and the algorithm will make the current phi dead
+ // as well.
+ equivalent->AsPhi()->SetLive();
+ AddToWorklist(equivalent->AsPhi());
+ }
+ }
+ } else if (new_type == Primitive::kPrimInt && input_type == Primitive::kPrimNot) {
+ new_type = Primitive::kPrimNot;
+ // Start over, we may request reference equivalents for the inputs of the phi.
+ i = -1;
+ } else if (new_type != input_type) {
+ conflict = true;
+ break;
+ }
+ }
+
+ if (conflict) {
+ phi->SetType(Primitive::kPrimVoid);
+ phi->SetDead();
+ return true;
+ } else {
+ DCHECK(phi->IsLive());
+ phi->SetType(new_type);
+ return existing != new_type;
+ }
+}
+
+void DeadPhiHandling::VisitBasicBlock(HBasicBlock* block) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ HPhi* phi = it.Current()->AsPhi();
+ if (phi->IsDead() && phi->HasEnvironmentUses()) {
+ phi->SetLive();
+ if (block->IsLoopHeader()) {
+ // Give a type to the loop phi, to guarantee convergence of the algorithm.
+ phi->SetType(phi->InputAt(0)->GetType());
+ AddToWorklist(phi);
+ } else {
+ // Because we are doing a reverse post order visit, all inputs of
+ // this phi have been visited and therefore had their (initial) type set.
+ UpdateType(phi);
+ }
+ }
+ }
+}
+
+void DeadPhiHandling::ProcessWorklist() {
+ while (!worklist_.IsEmpty()) {
+ HPhi* instruction = worklist_.Pop();
+ // Note that the same equivalent phi can be added multiple times in the work list, if
+ // used by multiple phis. The first call to `UpdateType` will know whether the phi is
+ // dead or live.
+ if (instruction->IsLive() && UpdateType(instruction)) {
+ AddDependentInstructionsToWorklist(instruction);
+ }
+ }
+}
+
+void DeadPhiHandling::AddToWorklist(HPhi* instruction) {
+ DCHECK(instruction->IsLive());
+ worklist_.Add(instruction);
+}
+
+void DeadPhiHandling::AddDependentInstructionsToWorklist(HPhi* instruction) {
+ for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
+ HPhi* phi = it.Current()->GetUser()->AsPhi();
+ if (phi != nullptr && !phi->IsDead()) {
+ AddToWorklist(phi);
+ }
+ }
+}
+
+void DeadPhiHandling::Run() {
+ for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ VisitBasicBlock(it.Current());
+ }
+ ProcessWorklist();
+}
+
+static bool IsPhiEquivalentOf(HInstruction* instruction, HPhi* phi) {
+ return instruction != nullptr
+ && instruction->IsPhi()
+ && instruction->AsPhi()->GetRegNumber() == phi->GetRegNumber();
+}
+
void SsaBuilder::BuildSsa() {
// 1) Visit in reverse post order. We need to have all predecessors of a block visited
// (with the exception of loops) in order to create the right environment for that
@@ -47,11 +199,9 @@
// our code generator will complain if the inputs of a phi do not have the same
// type. The marking allows the type propagation to know which phis it needs
// to handle. We mark but do not eliminate: the elimination will be done in
- // step 5).
- {
- SsaDeadPhiElimination dead_phis(GetGraph());
- dead_phis.MarkDeadPhis();
- }
+ // step 9).
+ SsaDeadPhiElimination dead_phis_for_type_propagation(GetGraph());
+ dead_phis_for_type_propagation.MarkDeadPhis();
// 4) Propagate types of phis. At this point, phis are typed void in the general
// case, or float/double/reference when we created an equivalent phi. So we
@@ -59,17 +209,62 @@
PrimitiveTypePropagation type_propagation(GetGraph());
type_propagation.Run();
- // 5) Step 4) changes inputs of phis which may lead to dead phis again. We re-run
- // the algorithm and this time elimimates them.
- // TODO: Make this work with debug info and reference liveness. We currently
- // eagerly remove phis used in environments.
- {
- SsaDeadPhiElimination dead_phis(GetGraph());
- dead_phis.Run();
+ // 5) Mark dead phis again. Steph 4) may have introduced new phis.
+ SsaDeadPhiElimination dead_phis(GetGraph());
+ dead_phis.MarkDeadPhis();
+
+ // 6) Now that the graph is correclty typed, we can get rid of redundant phis.
+ // Note that we cannot do this phase before type propagation, otherwise
+ // we could get rid of phi equivalents, whose presence is a requirement for the
+ // type propagation phase. Note that this is to satisfy statement (a) of the
+ // SsaBuilder (see ssa_builder.h).
+ SsaRedundantPhiElimination redundant_phi(GetGraph());
+ redundant_phi.Run();
+
+ // 7) Make sure environments use the right phi "equivalent": a phi marked dead
+ // can have a phi equivalent that is not dead. We must therefore update
+ // all environment uses of the dead phi to use its equivalent. Note that there
+ // can be multiple phis for the same Dex register that are live (for example
+ // when merging constants), in which case it is OK for the environments
+ // to just reference one.
+ for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ for (HInstructionIterator it_phis(block->GetPhis()); !it_phis.Done(); it_phis.Advance()) {
+ HPhi* phi = it_phis.Current()->AsPhi();
+ // If the phi is not dead, or has no environment uses, there is nothing to do.
+ if (!phi->IsDead() || !phi->HasEnvironmentUses()) continue;
+ HInstruction* next = phi->GetNext();
+ if (!IsPhiEquivalentOf(next, phi)) continue;
+ if (next->AsPhi()->IsDead()) {
+ // If the phi equivalent is dead, check if there is another one.
+ next = next->GetNext();
+ if (!IsPhiEquivalentOf(next, phi)) continue;
+ // There can be at most two phi equivalents.
+ DCHECK(!IsPhiEquivalentOf(next->GetNext(), phi));
+ if (next->AsPhi()->IsDead()) continue;
+ }
+ // We found a live phi equivalent. Update the environment uses of `phi` with it.
+ phi->ReplaceWith(next);
+ }
}
- // 6) Clear locals.
- // TODO: Move this to a dead code eliminator phase.
+ // 8) Deal with phis to guarantee liveness of phis in case of a debuggable
+ // application. This is for satisfying statement (c) of the SsaBuilder
+ // (see ssa_builder.h).
+ if (GetGraph()->IsDebuggable()) {
+ DeadPhiHandling dead_phi_handler(GetGraph());
+ dead_phi_handler.Run();
+ }
+
+ // 9) Now that the right phis are used for the environments, and we
+ // have potentially revive dead phis in case of a debuggable application,
+ // we can eliminate phis we do not need. Regardless of the debuggable status,
+ // this phase is necessary for statement (b) of the SsaBuilder (see ssa_builder.h),
+ // as well as for the code generation, which does not deal with phis of conflicting
+ // input types.
+ dead_phis.EliminateDeadPhis();
+
+ // 10) Clear locals.
for (HInstructionIterator it(GetGraph()->GetEntryBlock()->GetInstructions());
!it.Done();
it.Advance()) {
@@ -257,12 +452,12 @@
}
HInstruction* SsaBuilder::GetReferenceTypeEquivalent(HInstruction* value) {
- if (value->IsIntConstant()) {
- DCHECK_EQ(value->AsIntConstant()->GetValue(), 0);
+ if (value->IsIntConstant() && value->AsIntConstant()->GetValue() == 0) {
return value->GetBlock()->GetGraph()->GetNullConstant();
- } else {
- DCHECK(value->IsPhi());
+ } else if (value->IsPhi()) {
return GetFloatDoubleOrReferenceEquivalentOfPhi(value->AsPhi(), Primitive::kPrimNot);
+ } else {
+ return nullptr;
}
}
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index f50da46..24dc449 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -24,6 +24,28 @@
static constexpr int kDefaultNumberOfLoops = 2;
+/**
+ * Transforms a graph into SSA form. The liveness guarantees of
+ * this transformation are listed below. A DEX register
+ * being killed means its value at a given position in the code
+ * will not be available to its environment uses. A merge in the
+ * following text is materialized as a `HPhi`.
+ *
+ * (a) Dex registers that do not require merging (that is, they do not
+ * have different values at a join block) are available to all their
+ * environment uses. Note that it does not imply the instruction will
+ * have a physical location after register allocation. See the
+ * SsaLivenessAnalysis phase.
+ *
+ * (b) Dex registers that require merging, and the merging gives
+ * incompatible types, will be killed for environment uses of that merge.
+ *
+ * (c) When the `debuggable` flag is passed to the compiler, Dex registers
+ * that require merging and have a proper type after the merge, are
+ * available to all their environment uses. If the `debuggable` flag
+ * is not set, values of Dex registers only used by environments
+ * are killed.
+ */
class SsaBuilder : public HGraphVisitor {
public:
explicit SsaBuilder(HGraph* graph)
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index d009390..c0d6f42 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -230,11 +230,12 @@
}
if (current->HasEnvironment()) {
- // All instructions in the environment must be live.
+ // Handle environment uses. See statements (b) and (c) of the
+ // SsaLivenessAnalysis.
HEnvironment* environment = current->GetEnvironment();
for (size_t i = 0, e = environment->Size(); i < e; ++i) {
HInstruction* instruction = environment->GetInstructionAt(i);
- if (instruction != nullptr) {
+ if (ShouldBeLiveForEnvironment(instruction)) {
DCHECK(instruction->HasSsaIndex());
live_in->SetBit(instruction->GetSsaIndex());
instruction->GetLiveInterval()->AddUse(current, i, true);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 9ff2f20..b57029d 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -302,7 +302,7 @@
first_range_->start_ = from;
} else {
// Instruction without uses.
- DCHECK(!defined_by_->HasUses());
+ DCHECK(!defined_by_->HasNonEnvironmentUses());
DCHECK(from == defined_by_->GetLifetimePosition());
first_range_ = last_range_ = new (allocator_) LiveRange(from, from + 2, nullptr);
}
@@ -373,13 +373,17 @@
if (location.IsUnallocated()) {
if ((location.GetPolicy() == Location::kRequiresRegister)
|| (location.GetPolicy() == Location::kSameAsFirstInput
- && locations->InAt(0).GetPolicy() == Location::kRequiresRegister)) {
+ && (locations->InAt(0).IsRegister()
+ || locations->InAt(0).IsRegisterPair()
+ || locations->InAt(0).GetPolicy() == Location::kRequiresRegister))) {
return position;
} else if ((location.GetPolicy() == Location::kRequiresFpuRegister)
|| (location.GetPolicy() == Location::kSameAsFirstInput
&& locations->InAt(0).GetPolicy() == Location::kRequiresFpuRegister)) {
return position;
}
+ } else if (location.IsRegister() || location.IsRegisterPair()) {
+ return position;
}
}
@@ -794,6 +798,22 @@
DISALLOW_COPY_AND_ASSIGN(LiveInterval);
};
+/**
+ * Analysis that computes the liveness of instructions:
+ *
+ * (a) Non-environment uses of an instruction always make
+ * the instruction live.
+ * (b) Environment uses of an instruction whose type is
+ * object (that is, non-primitive), make the instruction live.
+ * This is due to having to keep alive objects that have
+ * finalizers deleting native objects.
+ * (c) When the graph has the debuggable property, environment uses
+ * of an instruction that has a primitive type make the instruction live.
+ * If the graph does not have the debuggable property, the environment
+ * use has no effect, and may get a 'none' value after register allocation.
+ *
+ * (b) and (c) are implemented through SsaLivenessAnalysis::ShouldBeLiveForEnvironment.
+ */
class SsaLivenessAnalysis : public ValueObject {
public:
SsaLivenessAnalysis(const HGraph& graph, CodeGenerator* codegen)
@@ -878,6 +898,12 @@
// Update the live_out set of the block and returns whether it has changed.
bool UpdateLiveOut(const HBasicBlock& block);
+ static bool ShouldBeLiveForEnvironment(HInstruction* instruction) {
+ if (instruction == nullptr) return false;
+ if (instruction->GetBlock()->GetGraph()->IsDebuggable()) return true;
+ return instruction->GetType() == Primitive::kPrimNot;
+ }
+
const HGraph& graph_;
CodeGenerator* const codegen_;
GrowableArray<HBasicBlock*> linear_order_;
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index a05b38c..00c241b 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -332,8 +332,8 @@
const char* expected =
"BasicBlock 0, succ: 1\n"
" 0: IntConstant 0 [4, 4]\n"
- " 1: IntConstant 4 [14]\n"
- " 2: IntConstant 5 [14]\n"
+ " 1: IntConstant 4 [13]\n"
+ " 2: IntConstant 5 [13]\n"
" 3: Goto\n"
"BasicBlock 1, pred: 0, succ: 3, 2\n"
" 4: Equal(0, 0) [5]\n"
@@ -343,18 +343,17 @@
"BasicBlock 3, pred: 1, succ: 8\n"
" 7: Goto\n"
"BasicBlock 4, pred: 8, 5, succ: 6, 5\n"
- " 8: Phi(14, 8) [8, 12, 9, 9]\n"
- " 9: Equal(8, 8) [10]\n"
- " 10: If(9)\n"
+ " 8: Equal(13, 13) [9]\n"
+ " 9: If(8)\n"
"BasicBlock 5, pred: 4, succ: 4\n"
- " 11: Goto\n"
+ " 10: Goto\n"
"BasicBlock 6, pred: 4, succ: 7\n"
- " 12: Return(8)\n"
+ " 11: Return(13)\n"
"BasicBlock 7, pred: 6\n"
- " 13: Exit\n"
+ " 12: Exit\n"
"BasicBlock 8, pred: 2, 3, succ: 4\n"
- " 14: Phi(1, 2) [8]\n"
- " 15: Goto\n";
+ " 13: Phi(1, 2) [8, 8, 11]\n"
+ " 14: Goto\n";
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 5283d5d..76ddbf3 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -56,11 +56,6 @@
size_t inline_infos_start_index;
};
- struct DexRegisterEntry {
- DexRegisterMap::LocationKind kind;
- int32_t value;
- };
-
struct InlineInfoEntry {
uint32_t method_index;
};
@@ -90,11 +85,11 @@
}
}
- void AddDexRegisterEntry(DexRegisterMap::LocationKind kind, int32_t value) {
- DexRegisterEntry entry;
- entry.kind = kind;
- entry.value = value;
- dex_register_maps_.Add(entry);
+ void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) {
+ // Ensure we only use non-compressed location kind at this stage.
+ DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
+ << DexRegisterLocation::PrettyDescriptor(kind);
+ dex_register_maps_.Add(DexRegisterLocation(kind, value));
}
void AddInlineInfoEntry(uint32_t method_index) {
@@ -106,7 +101,7 @@
size_t ComputeNeededSize() const {
return CodeInfo::kFixedSize
+ ComputeStackMapSize()
- + ComputeDexRegisterMapSize()
+ + ComputeDexRegisterMapsSize()
+ ComputeInlineInfoSize();
}
@@ -114,25 +109,43 @@
return stack_maps_.Size() * StackMap::ComputeAlignedStackMapSize(stack_mask_max_);
}
- size_t ComputeDexRegisterMapSize() const {
- // We currently encode all dex register information per stack map.
- return stack_maps_.Size() * DexRegisterMap::kFixedSize
- // For each dex register entry.
- + (dex_register_maps_.Size() * DexRegisterMap::SingleEntrySize());
+ // Compute the size of the Dex register map of `entry`.
+ size_t ComputeDexRegisterMapSize(const StackMapEntry& entry) const {
+ size_t size = DexRegisterMap::kFixedSize;
+ for (size_t j = 0; j < entry.num_dex_registers; ++j) {
+ DexRegisterLocation dex_register_location =
+ dex_register_maps_.Get(entry.dex_register_maps_start_index + j);
+ size += DexRegisterMap::EntrySize(dex_register_location);
+ }
+ return size;
}
+ // Compute the size of all the Dex register maps.
+ size_t ComputeDexRegisterMapsSize() const {
+ size_t size = stack_maps_.Size() * DexRegisterMap::kFixedSize;
+ // The size of each register location depends on the type of
+ // the entry.
+ for (size_t i = 0, e = dex_register_maps_.Size(); i < e; ++i) {
+ DexRegisterLocation entry = dex_register_maps_.Get(i);
+ size += DexRegisterMap::EntrySize(entry);
+ }
+ // On ARM, the Dex register maps must be 4-byte aligned.
+ return RoundUp(size, kWordAlignment);
+ }
+
+ // Compute the size of all the inline information pieces.
size_t ComputeInlineInfoSize() const {
return inline_infos_.Size() * InlineInfo::SingleEntrySize()
// For encoding the depth.
+ (number_of_stack_maps_with_inline_info_ * InlineInfo::kFixedSize);
}
- size_t ComputeInlineInfoStart() const {
- return ComputeDexRegisterMapStart() + ComputeDexRegisterMapSize();
+ size_t ComputeDexRegisterMapsStart() const {
+ return CodeInfo::kFixedSize + ComputeStackMapSize();
}
- size_t ComputeDexRegisterMapStart() const {
- return CodeInfo::kFixedSize + ComputeStackMapSize();
+ size_t ComputeInlineInfoStart() const {
+ return ComputeDexRegisterMapsStart() + ComputeDexRegisterMapsSize();
}
void FillIn(MemoryRegion region) {
@@ -143,8 +156,8 @@
uint8_t* memory_start = region.start();
MemoryRegion dex_register_maps_region = region.Subregion(
- ComputeDexRegisterMapStart(),
- ComputeDexRegisterMapSize());
+ ComputeDexRegisterMapsStart(),
+ ComputeDexRegisterMapsSize());
MemoryRegion inline_infos_region = region.Subregion(
ComputeInlineInfoStart(),
@@ -167,20 +180,25 @@
}
if (entry.num_dex_registers != 0) {
- // Set the register map.
- MemoryRegion register_region = dex_register_maps_region.Subregion(
- next_dex_register_map_offset,
- DexRegisterMap::kFixedSize
- + entry.num_dex_registers * DexRegisterMap::SingleEntrySize());
+ // Set the Dex register map.
+ MemoryRegion register_region =
+ dex_register_maps_region.Subregion(
+ next_dex_register_map_offset,
+ ComputeDexRegisterMapSize(entry));
next_dex_register_map_offset += register_region.size();
DexRegisterMap dex_register_map(register_region);
stack_map.SetDexRegisterMapOffset(register_region.start() - memory_start);
+ // Offset in `dex_register_map` where to store the next register entry.
+ size_t offset = DexRegisterMap::kFixedSize;
for (size_t j = 0; j < entry.num_dex_registers; ++j) {
- DexRegisterEntry register_entry =
- dex_register_maps_.Get(j + entry.dex_register_maps_start_index);
- dex_register_map.SetRegisterInfo(j, register_entry.kind, register_entry.value);
+ DexRegisterLocation dex_register_location =
+ dex_register_maps_.Get(entry.dex_register_maps_start_index + j);
+ dex_register_map.SetRegisterInfo(offset, dex_register_location);
+ offset += DexRegisterMap::EntrySize(dex_register_location);
}
+ // Ensure we reached the end of the Dex registers region.
+ DCHECK_EQ(offset, register_region.size());
} else {
stack_map.SetDexRegisterMapOffset(StackMap::kNoDexRegisterMap);
}
@@ -208,7 +226,7 @@
private:
GrowableArray<StackMapEntry> stack_maps_;
- GrowableArray<DexRegisterEntry> dex_register_maps_;
+ GrowableArray<DexRegisterLocation> dex_register_maps_;
GrowableArray<InlineInfoEntry> inline_infos_;
int stack_mask_max_;
size_t number_of_stack_maps_with_inline_info_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 5b02510..3a5f806 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -22,7 +22,7 @@
namespace art {
-bool SameBits(MemoryRegion region, const BitVector& bit_vector) {
+static bool SameBits(MemoryRegion region, const BitVector& bit_vector) {
for (size_t i = 0; i < region.size_in_bits(); ++i) {
if (region.LoadBit(i) != bit_vector.IsBitSet(i)) {
return false;
@@ -31,9 +31,9 @@
return true;
}
-size_t ComputeDexRegisterMapSize(size_t number_of_dex_registers) {
- return DexRegisterMap::kFixedSize
- + number_of_dex_registers * DexRegisterMap::SingleEntrySize();
+static size_t ComputeDexRegisterMapSize(const DexRegisterMap& dex_registers,
+ size_t number_of_dex_registers) {
+ return dex_registers.FindLocationOffset(number_of_dex_registers);
}
TEST(StackMapTest, Test1) {
@@ -44,8 +44,8 @@
ArenaBitVector sp_mask(&arena, 0, false);
size_t number_of_dex_registers = 2;
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(DexRegisterMap::kInStack, 0);
- stream.AddDexRegisterEntry(DexRegisterMap::kConstant, -2);
+ stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, 0);
+ stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, -2);
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -67,14 +67,17 @@
ASSERT_TRUE(SameBits(stack_mask, sp_mask));
ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_registers =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(16u, dex_registers.Size());
- ASSERT_EQ(16u, ComputeDexRegisterMapSize(number_of_dex_registers));
- ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
- ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
- ASSERT_EQ(0, dex_registers.GetValue(0));
- ASSERT_EQ(-2, dex_registers.GetValue(1));
+ DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ ASSERT_EQ(6u, dex_registers.Size());
+ ASSERT_EQ(6u, ComputeDexRegisterMapSize(dex_registers, number_of_dex_registers));
+ DexRegisterLocation location0 = dex_registers.GetLocationKindAndValue(0);
+ DexRegisterLocation location1 = dex_registers.GetLocationKindAndValue(1);
+ ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetKind());
+ ASSERT_EQ(DexRegisterLocation::Kind::kConstant, location1.GetKind());
+ ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetInternalKind());
+ ASSERT_EQ(DexRegisterLocation::Kind::kConstantLargeValue, location1.GetInternalKind());
+ ASSERT_EQ(0, location0.GetValue());
+ ASSERT_EQ(-2, location1.GetValue());
ASSERT_FALSE(stack_map.HasInlineInfo());
}
@@ -89,8 +92,8 @@
sp_mask1.SetBit(4);
size_t number_of_dex_registers = 2;
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
- stream.AddDexRegisterEntry(DexRegisterMap::kInStack, 0);
- stream.AddDexRegisterEntry(DexRegisterMap::kConstant, -2);
+ stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, 0);
+ stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, -2);
stream.AddInlineInfoEntry(42);
stream.AddInlineInfoEntry(82);
@@ -98,8 +101,8 @@
sp_mask2.SetBit(3);
sp_mask1.SetBit(8);
stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(DexRegisterMap::kInRegister, 18);
- stream.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, 3);
+ stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, 18);
+ stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, 3);
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -111,54 +114,66 @@
ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
// First stack map.
- StackMap stack_map = code_info.GetStackMapAt(0);
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
- ASSERT_EQ(0u, stack_map.GetDexPc());
- ASSERT_EQ(64u, stack_map.GetNativePcOffset());
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask());
+ {
+ StackMap stack_map = code_info.GetStackMapAt(0);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+ ASSERT_EQ(0u, stack_map.GetDexPc());
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset());
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask());
- MemoryRegion stack_mask = stack_map.GetStackMask();
- ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
+ MemoryRegion stack_mask = stack_map.GetStackMask();
+ ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
- ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_registers =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(16u, dex_registers.Size());
- ASSERT_EQ(16u, ComputeDexRegisterMapSize(number_of_dex_registers));
- ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
- ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
- ASSERT_EQ(0, dex_registers.GetValue(0));
- ASSERT_EQ(-2, dex_registers.GetValue(1));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap());
+ DexRegisterMap dex_registers =
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ ASSERT_EQ(6u, dex_registers.Size());
+ ASSERT_EQ(6u, ComputeDexRegisterMapSize(dex_registers, number_of_dex_registers));
+ DexRegisterLocation location0 = dex_registers.GetLocationKindAndValue(0);
+ DexRegisterLocation location1 = dex_registers.GetLocationKindAndValue(1);
+ ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetKind());
+ ASSERT_EQ(DexRegisterLocation::Kind::kConstant, location1.GetKind());
+ ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetInternalKind());
+ ASSERT_EQ(DexRegisterLocation::Kind::kConstantLargeValue, location1.GetInternalKind());
+ ASSERT_EQ(0, location0.GetValue());
+ ASSERT_EQ(-2, location1.GetValue());
- ASSERT_TRUE(stack_map.HasInlineInfo());
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
- ASSERT_EQ(2u, inline_info.GetDepth());
- ASSERT_EQ(42u, inline_info.GetMethodReferenceIndexAtDepth(0));
- ASSERT_EQ(82u, inline_info.GetMethodReferenceIndexAtDepth(1));
+ ASSERT_TRUE(stack_map.HasInlineInfo());
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ ASSERT_EQ(2u, inline_info.GetDepth());
+ ASSERT_EQ(42u, inline_info.GetMethodReferenceIndexAtDepth(0));
+ ASSERT_EQ(82u, inline_info.GetMethodReferenceIndexAtDepth(1));
+ }
// Second stack map.
- stack_map = code_info.GetStackMapAt(1);
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u)));
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u)));
- ASSERT_EQ(1u, stack_map.GetDexPc());
- ASSERT_EQ(128u, stack_map.GetNativePcOffset());
- ASSERT_EQ(0xFFu, stack_map.GetRegisterMask());
+ {
+ StackMap stack_map = code_info.GetStackMapAt(1);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u)));
+ ASSERT_EQ(1u, stack_map.GetDexPc());
+ ASSERT_EQ(128u, stack_map.GetNativePcOffset());
+ ASSERT_EQ(0xFFu, stack_map.GetRegisterMask());
- stack_mask = stack_map.GetStackMask();
- ASSERT_TRUE(SameBits(stack_mask, sp_mask2));
+ MemoryRegion stack_mask = stack_map.GetStackMask();
+ ASSERT_TRUE(SameBits(stack_mask, sp_mask2));
- ASSERT_TRUE(stack_map.HasDexRegisterMap());
- dex_registers =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(16u, dex_registers.Size());
- ASSERT_EQ(16u, ComputeDexRegisterMapSize(number_of_dex_registers));
- ASSERT_EQ(DexRegisterMap::kInRegister, dex_registers.GetLocationKind(0));
- ASSERT_EQ(DexRegisterMap::kInFpuRegister, dex_registers.GetLocationKind(1));
- ASSERT_EQ(18, dex_registers.GetValue(0));
- ASSERT_EQ(3, dex_registers.GetValue(1));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap());
+ DexRegisterMap dex_registers =
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ ASSERT_EQ(2u, dex_registers.Size());
+ ASSERT_EQ(2u, ComputeDexRegisterMapSize(dex_registers, number_of_dex_registers));
+ DexRegisterLocation location0 = dex_registers.GetLocationKindAndValue(0);
+ DexRegisterLocation location1 = dex_registers.GetLocationKindAndValue(1);
+ ASSERT_EQ(DexRegisterLocation::Kind::kInRegister, location0.GetKind());
+ ASSERT_EQ(DexRegisterLocation::Kind::kInFpuRegister, location1.GetKind());
+ ASSERT_EQ(DexRegisterLocation::Kind::kInRegister, location0.GetInternalKind());
+ ASSERT_EQ(DexRegisterLocation::Kind::kInFpuRegister, location1.GetInternalKind());
+ ASSERT_EQ(18, location0.GetValue());
+ ASSERT_EQ(3, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo());
+ ASSERT_FALSE(stack_map.HasInlineInfo());
+ }
}
} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 8f4208b..90170ce 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -451,6 +451,36 @@
}
+void X86Assembler::movhpd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x16);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movhpd(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x17);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::psrldq(XmmRegister reg, const Immediate& shift_count) {
+ DCHECK(shift_count.is_uint8());
+
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x73);
+ EmitXmmRegisterOperand(3, reg);
+ EmitUint8(shift_count.value());
+}
+
+
void X86Assembler::psrlq(XmmRegister reg, const Immediate& shift_count) {
DCHECK(shift_count.is_uint8());
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 2dde907..4d20db0 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -277,6 +277,11 @@
void psrlq(XmmRegister reg, const Immediate& shift_count);
void punpckldq(XmmRegister dst, XmmRegister src);
+ void movhpd(XmmRegister dst, const Address& src);
+ void movhpd(const Address& dst, XmmRegister src);
+
+ void psrldq(XmmRegister reg, const Immediate& shift_count);
+
void addsd(XmmRegister dst, XmmRegister src);
void addsd(XmmRegister dst, const Address& src);
void subsd(XmmRegister dst, XmmRegister src);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 8572f4d..bb80a70 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -56,6 +56,7 @@
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
#include "image_writer.h"
+#include "interpreter/unstarted_runtime.h"
#include "leb128.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -873,14 +874,11 @@
// For R6, only interpreter mode is working.
// TODO: fix compiler for Mips32r6.
compiler_filter_string = "interpret-only";
- } else if (instruction_set_ == kMips64) {
- // For Mips64, can only compile in interpreter mode.
- // TODO: fix compiler for Mips64.
- compiler_filter_string = "interpret-only";
} else {
compiler_filter_string = "speed";
}
}
+
CHECK(compiler_filter_string != nullptr);
CompilerOptions::CompilerFilter compiler_filter = CompilerOptions::kDefaultCompilerFilter;
if (strcmp(compiler_filter_string, "verify-none") == 0) {
@@ -1544,8 +1542,14 @@
}
}
runtime->GetClassLinker()->FixupDexCaches(runtime->GetResolutionMethod());
+
+ // Initialize maps for unstarted runtime. This needs to be here, as running clinits needs this
+ // set up.
+ interpreter::UnstartedRuntimeInitialize();
+
runtime->GetClassLinker()->RunRootClinits();
runtime_ = runtime;
+
return true;
}
@@ -1647,9 +1651,10 @@
void LogCompletionTime() {
// Note: when creation of a runtime fails, e.g., when trying to compile an app but when there
// is no image, there won't be a Runtime::Current().
+ // Note: driver creation can fail when loading an invalid dex file.
LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_)
<< " (threads: " << thread_count_ << ") "
- << ((Runtime::Current() != nullptr) ?
+ << ((Runtime::Current() != nullptr && driver_.get() != nullptr) ?
driver_->GetMemoryUsageString(kIsDebugBuild || VLOG_IS_ON(compiler)) :
"");
}
diff --git a/disassembler/disassembler_mips64.cc b/disassembler/disassembler_mips64.cc
index 2d3239f..7b289d0 100644
--- a/disassembler/disassembler_mips64.cc
+++ b/disassembler/disassembler_mips64.cc
@@ -43,7 +43,7 @@
static const uint32_t kITypeMask = (0x3f << kOpcodeShift);
static const uint32_t kJTypeMask = (0x3f << kOpcodeShift);
static const uint32_t kRTypeMask = ((0x3f << kOpcodeShift) | (0x3f));
-static const uint32_t kSpecial2Mask = (0x3f << kOpcodeShift);
+static const uint32_t kSpecial0Mask = (0x3f << kOpcodeShift);
static const uint32_t kFpMask = kRTypeMask;
static const Mips64Instruction gMips64Instructions[] = {
@@ -58,24 +58,15 @@
{ kRTypeMask, 4, "sllv", "DTS", },
{ kRTypeMask, 6, "srlv", "DTS", },
{ kRTypeMask, 7, "srav", "DTS", },
- { kRTypeMask, 8, "jr", "S", },
- // rd = 31 is implicit.
- { kRTypeMask | (0x1f << 11), 9 | (31 << 11), "jalr", "S", },
+ { kRTypeMask | (0x1f << 11), 9 | (31 << 11), "jalr", "S", }, // rd = 31 is implicit.
+ { kRTypeMask | (0x1f << 11), 9, "jr", "S", }, // rd = 0 is implicit.
{ kRTypeMask, 9, "jalr", "DS", }, // General case.
- { kRTypeMask | (0x1f << 6), 10, "movz", "DST", },
- { kRTypeMask | (0x1f << 6), 11, "movn", "DST", },
{ kRTypeMask, 12, "syscall", "", }, // TODO: code
{ kRTypeMask, 13, "break", "", }, // TODO: code
{ kRTypeMask, 15, "sync", "", }, // TODO: type
- { kRTypeMask, 16, "mfhi", "D", },
- { kRTypeMask, 17, "mthi", "S", },
- { kRTypeMask, 18, "mflo", "D", },
- { kRTypeMask, 19, "mtlo", "S", },
- { kRTypeMask, 24, "mult", "ST", },
- { kRTypeMask, 25, "multu", "ST", },
- { kRTypeMask, 26, "div", "ST", },
- { kRTypeMask, 27, "divu", "ST", },
- { kRTypeMask, 32, "add", "DST", },
+ { kRTypeMask, 20, "dsllv", "DTS", },
+ { kRTypeMask, 22, "dsrlv", "DTS", },
+ { kRTypeMask, 23, "dsrav", "DTS", },
{ kRTypeMask, 33, "addu", "DST", },
{ kRTypeMask, 34, "sub", "DST", },
{ kRTypeMask, 35, "subu", "DST", },
@@ -85,27 +76,37 @@
{ kRTypeMask, 39, "nor", "DST", },
{ kRTypeMask, 42, "slt", "DST", },
{ kRTypeMask, 43, "sltu", "DST", },
- { kRTypeMask, 44, "dadd", "DST", },
{ kRTypeMask, 45, "daddu", "DST", },
{ kRTypeMask, 46, "dsub", "DST", },
{ kRTypeMask, 47, "dsubu", "DST", },
- // 0, 48, tge
- // 0, 49, tgeu
- // 0, 50, tlt
- // 0, 51, tltu
- // 0, 52, teq
- // 0, 54, tne
+ // TODO: seleqz, selnez
+ { kRTypeMask, 56, "dsll", "DTA", },
+ { kRTypeMask, 58, "dsrl", "DTA", },
+ { kRTypeMask, 59, "dsra", "DTA", },
+ { kRTypeMask, 60, "dsll32", "DTA", },
+ { kRTypeMask | (0x1f << 21), 62 | (1 << 21), "drotr32", "DTA", },
+ { kRTypeMask, 62, "dsrl32", "DTA", },
+ { kRTypeMask, 63, "dsra32", "DTA", },
- // SPECIAL2
- { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 2, "mul", "DST" },
- { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 32, "clz", "DS" },
- { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 36, "dclz", "DS" },
- { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 0, "madd", "ST" },
- { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 1, "maddu", "ST" },
- { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 2, "mul", "DST" },
- { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 4, "msub", "ST" },
- { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 5, "msubu", "ST" },
- { kSpecial2Mask | 0x3f, (28 << kOpcodeShift) | 0x3f, "sdbbp", "" },
+ // SPECIAL0
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 24, "mul", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 24, "muh", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 25, "mulu", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 25, "muhu", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 26, "div", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 26, "mod", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 27, "divu", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 27, "modu", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 28, "dmul", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 28, "dmuh", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 29, "dmulu", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 29, "dmuhu", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 30, "ddiv", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 30, "dmod", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 31, "ddivu", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 31, "dmodu", "DST" },
+ // TODO: [d]clz, [d]clo
+ // TODO: sdbbp
// J-type instructions.
{ kJTypeMask, 2 << kOpcodeShift, "j", "L" },
@@ -116,33 +117,31 @@
{ kITypeMask, 5 << kOpcodeShift, "bne", "STB" },
{ kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (1 << 16), "bgez", "SB" },
{ kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (0 << 16), "bltz", "SB" },
- { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (2 << 16), "bltzl", "SB" },
- { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (16 << 16), "bltzal", "SB" },
- { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (18 << 16), "bltzall", "SB" },
{ kITypeMask | (0x1f << 16), 6 << kOpcodeShift | (0 << 16), "blez", "SB" },
{ kITypeMask | (0x1f << 16), 7 << kOpcodeShift | (0 << 16), "bgtz", "SB" },
+ { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (6 << 16), "dahi", "Si", },
+ { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (30 << 16), "dati", "Si", },
{ 0xffff0000, (4 << kOpcodeShift), "b", "B" },
{ 0xffff0000, (1 << kOpcodeShift) | (17 << 16), "bal", "B" },
- { kITypeMask, 8 << kOpcodeShift, "addi", "TSi", },
{ kITypeMask, 9 << kOpcodeShift, "addiu", "TSi", },
{ kITypeMask, 10 << kOpcodeShift, "slti", "TSi", },
{ kITypeMask, 11 << kOpcodeShift, "sltiu", "TSi", },
{ kITypeMask, 12 << kOpcodeShift, "andi", "TSi", },
{ kITypeMask, 13 << kOpcodeShift, "ori", "TSi", },
- { kITypeMask, 14 << kOpcodeShift, "ori", "TSi", },
- { kITypeMask, 15 << kOpcodeShift, "lui", "TI", },
-
- { kITypeMask, 24 << kOpcodeShift, "daddi", "TSi", },
+ { kITypeMask, 14 << kOpcodeShift, "xori", "TSi", },
+ { kITypeMask | (0x1f << 21), 15 << kOpcodeShift, "lui", "TI", },
+ { kITypeMask, 15 << kOpcodeShift, "aui", "TSI", },
{ kITypeMask, 25 << kOpcodeShift, "daddiu", "TSi", },
-
+ { kITypeMask, 29 << kOpcodeShift, "daui", "TSi", },
{ kITypeMask, 32u << kOpcodeShift, "lb", "TO", },
{ kITypeMask, 33u << kOpcodeShift, "lh", "TO", },
{ kITypeMask, 35u << kOpcodeShift, "lw", "TO", },
{ kITypeMask, 36u << kOpcodeShift, "lbu", "TO", },
{ kITypeMask, 37u << kOpcodeShift, "lhu", "TO", },
+ { kITypeMask, 39u << kOpcodeShift, "lwu", "TO", },
{ kITypeMask, 40u << kOpcodeShift, "sb", "TO", },
{ kITypeMask, 41u << kOpcodeShift, "sh", "TO", },
{ kITypeMask, 43u << kOpcodeShift, "sw", "TO", },
@@ -154,27 +153,31 @@
{ kITypeMask, 63u << kOpcodeShift, "sd", "TO", },
// Floating point.
- { kFpMask, kCop1 | 0, "add", "fdst" },
- { kFpMask, kCop1 | 1, "sub", "fdst" },
- { kFpMask, kCop1 | 2, "mul", "fdst" },
- { kFpMask, kCop1 | 3, "div", "fdst" },
- { kFpMask | (0x1f << 16), kCop1 | 4, "sqrt", "fdst" },
- { kFpMask | (0x1f << 16), kCop1 | 5, "abs", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 6, "mov", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 7, "neg", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 8, "round.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 9, "trunc.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 10, "ceil.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 11, "floor.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 12, "round.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 13, "trunc.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 14, "ceil.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 15, "floor.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 32, "cvt.s", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 33, "cvt.d", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 36, "cvt.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 37, "cvt.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 38, "cvt.ps", "fds" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x00 << 21), "mfc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x01 << 21), "dmfc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x04 << 21), "mtc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x05 << 21), "dmtc1", "Td" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 0, "add", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 1, "sub", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 2, "mul", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 3, "div", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 4, "sqrt", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 5, "abs", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 6, "mov", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 7, "neg", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 8, "round.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 9, "trunc.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 10, "ceil.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 11, "floor.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 12, "round.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 13, "trunc.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 14, "ceil.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 15, "floor.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 32, "cvt.s", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 33, "cvt.d", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 36, "cvt.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 37, "cvt.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 38, "cvt.ps", "fad" },
};
static uint32_t ReadU32(const uint8_t* ptr) {
@@ -216,6 +219,7 @@
break;
case 'D': args << 'r' << rd; break;
case 'd': args << 'f' << rd; break;
+ case 'a': args << 'f' << sa; break;
case 'f': // Floating point "fmt".
{
size_t fmt = (instruction >> 21) & 0x7; // TODO: other fmts?
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index aab4f8b..9ae3b79 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1039,6 +1039,33 @@
}
}
+ void DumpRegisterMapping(std::ostream& os,
+ size_t dex_register_num,
+ DexRegisterLocation::Kind kind,
+ int32_t value,
+ const std::string& prefix = "v",
+ const std::string& suffix = "") {
+ os << " " << prefix << dex_register_num << ": "
+ << DexRegisterLocation::PrettyDescriptor(kind)
+ << " (" << value << ")" << suffix << '\n';
+ }
+
+ void DumpStackMapHeader(std::ostream& os, const CodeInfo& code_info, size_t stack_map_num) {
+ StackMap stack_map = code_info.GetStackMapAt(stack_map_num);
+ os << " StackMap " << stack_map_num
+ << std::hex
+ << " (dex_pc=0x" << stack_map.GetDexPc()
+ << ", native_pc_offset=0x" << stack_map.GetNativePcOffset()
+ << ", register_mask=0x" << stack_map.GetRegisterMask()
+ << std::dec
+ << ", stack_mask=0b";
+ MemoryRegion stack_mask = stack_map.GetStackMask();
+ for (size_t i = 0, e = stack_mask.size_in_bits(); i < e; ++i) {
+ os << stack_mask.LoadBit(e - i - 1);
+ }
+ os << ")\n";
+ };
+
// Display a CodeInfo object emitted by the optimizing compiler.
void DumpCodeInfo(std::ostream& os,
const CodeInfo& code_info,
@@ -1049,27 +1076,21 @@
os << " Optimized CodeInfo (size=" << code_info_size
<< ", number_of_dex_registers=" << number_of_dex_registers
<< ", number_of_stack_maps=" << number_of_stack_maps << ")\n";
+
+ // Display stack maps along with Dex register maps.
for (size_t i = 0; i < number_of_stack_maps; ++i) {
StackMap stack_map = code_info.GetStackMapAt(i);
- // TODO: Display stack_mask value.
- os << " StackMap " << i
- << std::hex
- << " (dex_pc=0x" << stack_map.GetDexPc()
- << ", native_pc_offset=0x" << stack_map.GetNativePcOffset()
- << ", register_mask=0x" << stack_map.GetRegisterMask()
- << std::dec
- << ")\n";
+ DumpStackMapHeader(os, code_info, i);
if (stack_map.HasDexRegisterMap()) {
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
for (size_t j = 0; j < number_of_dex_registers; ++j) {
- os << " v" << j << ": "
- << DexRegisterMap::PrettyDescriptor(dex_register_map.GetLocationKind(j))
- << " (" << dex_register_map.GetValue(j) << ")\n";
+ DexRegisterLocation location = dex_register_map.GetLocationKindAndValue(j);
+ DumpRegisterMapping(os, j, location.GetInternalKind(), location.GetValue());
}
}
- // TODO: Display more information from code_info.
}
+ // TODO: Dump the stack map's inline information.
}
// Display a vmap table.
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 3c6a23d..9584064 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -1175,6 +1175,9 @@
input_oat_filename = "input-oat-file";
}
input_oat.reset(new File(input_oat_fd, input_oat_filename, false));
+ if (input_oat_fd == output_oat_fd) {
+ input_oat.get()->DisableAutoClose();
+ }
if (input_oat == nullptr) {
// Unlikely, but ensure exhaustive logging in non-0 exit code case
LOG(ERROR) << "Failed to open input oat file by its FD" << input_oat_fd;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index c5cf890..8f20381 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -80,6 +80,7 @@
interpreter/interpreter.cc \
interpreter/interpreter_common.cc \
interpreter/interpreter_switch_impl.cc \
+ interpreter/unstarted_runtime.cc \
java_vm_ext.cc \
jdwp/jdwp_event.cc \
jdwp/jdwp_expand_buf.cc \
@@ -135,6 +136,7 @@
native/sun_misc_Unsafe.cc \
oat.cc \
oat_file.cc \
+ oat_file_assistant.cc \
object_lock.cc \
offsets.cc \
os_linux.cc \
@@ -151,7 +153,6 @@
thread.cc \
thread_list.cc \
thread_pool.cc \
- throw_location.cc \
trace.cc \
transaction.cc \
profiler.cc \
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 6f1b826..8cb95f1 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -422,20 +422,120 @@
move $v1, $zero
END art_quick_do_long_jump
-UNIMPLEMENTED art_quick_deliver_exception
-UNIMPLEMENTED art_quick_throw_null_pointer_exception
-UNIMPLEMENTED art_quick_throw_div_zero
-UNIMPLEMENTED art_quick_throw_array_bounds
-UNIMPLEMENTED art_quick_throw_stack_overflow
-UNIMPLEMENTED art_quick_throw_no_such_method
+ /*
+ * Called by managed code, saves most registers (forms basis of long jump
+ * context) and passes the bottom of the stack.
+ * artDeliverExceptionFromCode will place the callee save Method* at
+ * the bottom of the thread. On entry v0 holds Throwable*
+ */
+ENTRY art_quick_deliver_exception
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artDeliverExceptionFromCode
+ jalr $zero, $t9 # artDeliverExceptionFromCode(Throwable*, Thread*)
+ move $a1, rSELF # pass Thread::Current
+END art_quick_deliver_exception
-UNIMPLEMENTED art_quick_invoke_interface_trampoline
-UNIMPLEMENTED art_quick_invoke_interface_trampoline_with_access_check
+ /*
+ * Called by managed code to create and deliver a NullPointerException
+ */
+ .extern artThrowNullPointerExceptionFromCode
+ENTRY art_quick_throw_null_pointer_exception
+.Lart_quick_throw_null_pointer_exception_gp_set:
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowNullPointerExceptionFromCode
+ jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
+END art_quick_throw_null_pointer_exception
-UNIMPLEMENTED art_quick_invoke_static_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_direct_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_super_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_virtual_trampoline_with_access_check
+ /*
+ * Called by managed code to create and deliver an ArithmeticException
+ */
+ .extern artThrowDivZeroFromCode
+ENTRY art_quick_throw_div_zero
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowDivZeroFromCode
+ jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
+END art_quick_throw_div_zero
+
+ /*
+ * Called by managed code to create and deliver an
+ * ArrayIndexOutOfBoundsException
+ */
+ .extern artThrowArrayBoundsFromCode
+ENTRY art_quick_throw_array_bounds
+.Lart_quick_throw_array_bounds_gp_set:
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowArrayBoundsFromCode
+ jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_throw_array_bounds
+
+ /*
+ * Called by managed code to create and deliver a StackOverflowError.
+ */
+ .extern artThrowStackOverflowFromCode
+ENTRY art_quick_throw_stack_overflow
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowStackOverflowFromCode
+ jalr $zero, $t9 # artThrowStackOverflowFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
+END art_quick_throw_stack_overflow
+
+ /*
+ * Called by managed code to create and deliver a NoSuchMethodError.
+ */
+ .extern artThrowNoSuchMethodFromCode
+ENTRY art_quick_throw_no_such_method
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowNoSuchMethodFromCode
+ jalr $zero, $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*)
+ move $a1, rSELF # pass Thread::Current
+END art_quick_throw_no_such_method
+
+ /*
+ * All generated callsites for interface invokes and invocation slow paths will load arguments
+ * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
+ * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
+ * stack and call the appropriate C helper.
+ * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
+ *
+ * The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
+ * of the target Method* in $v0 and method->code_ in $v1.
+ *
+ * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+ * thread and we branch to another stub to deliver it.
+ *
+ * On success this wrapper will restore arguments and *jump* to the target, leaving the ra
+ * pointing back to the original caller.
+ */
+.macro INVOKE_TRAMPOLINE c_name, cxx_name
+ .extern \cxx_name
+ENTRY \c_name
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
+ lwu $a2, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE($sp) # pass caller Method*
+ move $a3, rSELF # pass Thread::Current
+ jal \cxx_name # (method_idx, this, caller, Thread*, $sp)
+ move $a4, $sp # pass $sp
+ move $a0, $v0 # save target Method*
+ move $t9, $v1 # save $v0->code_
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ beq $v0, $zero, 1f
+ nop
+ jalr $zero, $t9
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+END \c_name
+.endm
+
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
# On entry:
# t0 = shorty
@@ -454,7 +554,7 @@
li $t9, 74 # put char 'J' into t9
beq $t9, $t3, 3f # branch if result type char == 'J'
nop
- lwu $\gpu, 0($t1)
+ lw $\gpu, 0($t1)
sw $\gpu, 0($v0)
daddiu $v0, 4
daddiu $t1, 4
@@ -699,63 +799,534 @@
sw $v1, 4($a4) # store the other half of the result
END art_quick_invoke_static_stub
+ /*
+ * Entry from managed code that calls artHandleFillArrayDataFromCode and
+ * delivers exception on failure.
+ */
+ .extern artHandleFillArrayDataFromCode
+ENTRY art_quick_handle_fill_data
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artHandleFillArrayDataFromCode # (payload offset, Array*, method, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_handle_fill_data
+ /*
+ * Entry from managed code that calls artLockObjectFromCode, may block for GC.
+ */
+ .extern artLockObjectFromCode
+ENTRY art_quick_lock_object
+ beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ jal artLockObjectFromCode # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_lock_object
-UNIMPLEMENTED art_quick_handle_fill_data
-UNIMPLEMENTED art_quick_lock_object
-UNIMPLEMENTED art_quick_unlock_object
-UNIMPLEMENTED art_quick_check_cast
-UNIMPLEMENTED art_quick_aput_obj_with_null_and_bound_check
-UNIMPLEMENTED art_quick_aput_obj_with_bound_check
-UNIMPLEMENTED art_quick_aput_obj
-UNIMPLEMENTED art_quick_initialize_static_storage
-UNIMPLEMENTED art_quick_initialize_type
-UNIMPLEMENTED art_quick_initialize_type_and_verify_access
-UNIMPLEMENTED art_quick_get_boolean_static
-UNIMPLEMENTED art_quick_get_byte_static
-UNIMPLEMENTED art_quick_get_char_static
-UNIMPLEMENTED art_quick_get_short_static
-UNIMPLEMENTED art_quick_get32_static
-UNIMPLEMENTED art_quick_get64_static
-UNIMPLEMENTED art_quick_get_obj_static
-UNIMPLEMENTED art_quick_get_boolean_instance
-UNIMPLEMENTED art_quick_get_byte_instance
-UNIMPLEMENTED art_quick_get_char_instance
-UNIMPLEMENTED art_quick_get_short_instance
-UNIMPLEMENTED art_quick_get32_instance
-UNIMPLEMENTED art_quick_get64_instance
-UNIMPLEMENTED art_quick_get_obj_instance
-UNIMPLEMENTED art_quick_set8_static
-UNIMPLEMENTED art_quick_set16_static
-UNIMPLEMENTED art_quick_set32_static
-UNIMPLEMENTED art_quick_set64_static
-UNIMPLEMENTED art_quick_set_obj_static
-UNIMPLEMENTED art_quick_set8_instance
-UNIMPLEMENTED art_quick_set16_instance
-UNIMPLEMENTED art_quick_set32_instance
-UNIMPLEMENTED art_quick_set64_instance
-UNIMPLEMENTED art_quick_set_obj_instance
-UNIMPLEMENTED art_quick_resolve_string
+ /*
+ * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
+ */
+ .extern artUnlockObjectFromCode
+ENTRY art_quick_unlock_object
+ beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ jal artUnlockObjectFromCode # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_unlock_object
+
+ /*
+ * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+ */
+ .extern artThrowClassCastException
+ENTRY art_quick_check_cast
+ daddiu $sp, $sp, -32
+ .cfi_adjust_cfa_offset 32
+ sd $ra, 24($sp)
+ .cfi_rel_offset 31, 24
+ sd $t9, 16($sp)
+ sd $a1, 8($sp)
+ sd $a0, 0($sp)
+ jal artIsAssignableFromCode
+ nop
+ beq $v0, $zero, .Lthrow_class_cast_exception
+ ld $ra, 24($sp)
+ jalr $zero, $ra
+ daddiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+.Lthrow_class_cast_exception:
+ ld $t9, 16($sp)
+ ld $a1, 8($sp)
+ ld $a0, 0($sp)
+ daddiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowClassCastException
+ jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_check_cast
+
+ /*
+ * Entry from managed code for array put operations of objects where the value being stored
+ * needs to be checked for compatibility.
+ * a0 = array, a1 = index, a2 = value
+ */
+ENTRY art_quick_aput_obj_with_null_and_bound_check
+ bne $a0, $zero, .Lart_quick_aput_obj_with_bound_check_gp_set
+ nop
+ b .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+END art_quick_aput_obj_with_null_and_bound_check
+
+ENTRY art_quick_aput_obj_with_bound_check
+ lwu $t0, MIRROR_ARRAY_LENGTH_OFFSET($a0)
+ sltu $t1, $a1, $t0
+ bne $t1, $zero, .Lart_quick_aput_obj_gp_set
+ nop
+ move $a0, $a1
+ b .Lart_quick_throw_array_bounds_gp_set
+ move $a1, $t0
+END art_quick_aput_obj_with_bound_check
+
+ENTRY art_quick_aput_obj
+ beq $a2, $zero, .Ldo_aput_null
+ nop
+ lwu $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
+ lwu $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
+ lwu $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
+ bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
+ nop
+.Ldo_aput:
+ dsll $a1, $a1, 2
+ daddu $t0, $a0, $a1
+ sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
+ ld $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
+ dsrl $t1, $a0, 7
+ daddu $t1, $t1, $t0
+ sb $t0, ($t1)
+ jalr $zero, $ra
+ nop
+.Ldo_aput_null:
+ dsll $a1, $a1, 2
+ daddu $t0, $a0, $a1
+ sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
+ jalr $zero, $ra
+ nop
+.Lcheck_assignability:
+ daddiu $sp, $sp, -64
+ .cfi_adjust_cfa_offset 64
+ sd $ra, 56($sp)
+ .cfi_rel_offset 31, 56
+ sd $t9, 24($sp)
+ sd $a2, 16($sp)
+ sd $a1, 8($sp)
+ sd $a0, 0($sp)
+ move $a1, $t1
+ move $a0, $t0
+ jal artIsAssignableFromCode # (Class*, Class*)
+ nop
+ ld $ra, 56($sp)
+ ld $t9, 24($sp)
+ ld $a2, 16($sp)
+ ld $a1, 8($sp)
+ ld $a0, 0($sp)
+ daddiu $sp, $sp, 64
+ .cfi_adjust_cfa_offset -64
+ bne $v0, $zero, .Ldo_aput
+ nop
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a1, $a2
+ dla $t9, artThrowArrayStoreException
+ jalr $zero, $t9 # artThrowArrayStoreException(Class*, Class*, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_aput_obj
+
+ /*
+ * Entry from managed code when uninitialized static storage, this stub will run the class
+ * initializer and deliver the exception on error. On success the static storage base is
+ * returned.
+ */
+ .extern artInitializeStaticStorageFromCode
+ENTRY art_quick_initialize_static_storage
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*)
+ jal artInitializeStaticStorageFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_static_storage
+
+ /*
+ * Entry from managed code when dex cache misses for a type_idx.
+ */
+ .extern artInitializeTypeFromCode
+ENTRY art_quick_initialize_type
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
+ jal artInitializeTypeFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_type
+
+ /*
+ * Entry from managed code when type_idx needs to be checked for access and dex cache may also
+ * miss.
+ */
+ .extern artInitializeTypeAndVerifyAccessFromCode
+ENTRY art_quick_initialize_type_and_verify_access
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
+ jal artInitializeTypeAndVerifyAccessFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_type_and_verify_access
+
+ /*
+ * Called by managed code to resolve a static field and load a boolean primitive value.
+ */
+ .extern artGetBooleanStaticFromCode
+ENTRY art_quick_get_boolean_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_boolean_static
+
+ /*
+ * Called by managed code to resolve a static field and load a byte primitive value.
+ */
+ .extern artGetByteStaticFromCode
+ENTRY art_quick_get_byte_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_byte_static
+
+ /*
+ * Called by managed code to resolve a static field and load a char primitive value.
+ */
+ .extern artGetCharStaticFromCode
+ENTRY art_quick_get_char_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_char_static
+
+ /*
+ * Called by managed code to resolve a static field and load a short primitive value.
+ */
+ .extern artGetShortStaticFromCode
+ENTRY art_quick_get_short_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_short_static
+
+ /*
+ * Called by managed code to resolve a static field and load a 32-bit primitive value.
+ */
+ .extern artGet32StaticFromCode
+ENTRY art_quick_get32_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get32_static
+
+ /*
+ * Called by managed code to resolve a static field and load a 64-bit primitive value.
+ */
+ .extern artGet64StaticFromCode
+ENTRY art_quick_get64_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get64_static
+
+ /*
+ * Called by managed code to resolve a static field and load an object reference.
+ */
+ .extern artGetObjStaticFromCode
+ENTRY art_quick_get_obj_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_obj_static
+
+ /*
+ * Called by managed code to resolve an instance field and load a boolean primitive value.
+ */
+ .extern artGetBooleanInstanceFromCode
+ENTRY art_quick_get_boolean_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_boolean_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a byte primitive value.
+ */
+ .extern artGetByteInstanceFromCode
+ENTRY art_quick_get_byte_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_byte_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a char primitive value.
+ */
+ .extern artGetCharInstanceFromCode
+ENTRY art_quick_get_char_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_char_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a short primitive value.
+ */
+ .extern artGetShortInstanceFromCode
+ENTRY art_quick_get_short_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_short_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a 32-bit primitive value.
+ */
+ .extern artGet32InstanceFromCode
+ENTRY art_quick_get32_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get32_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a 64-bit primitive value.
+ */
+ .extern artGet64InstanceFromCode
+ENTRY art_quick_get64_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get64_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load an object reference.
+ */
+ .extern artGetObjInstanceFromCode
+ENTRY art_quick_get_obj_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_obj_instance
+
+ /*
+ * Called by managed code to resolve a static field and store a 8-bit primitive value.
+ */
+ .extern artSet8StaticFromCode
+ENTRY art_quick_set8_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set8_static
+
+ /*
+ * Called by managed code to resolve a static field and store a 16-bit primitive value.
+ */
+ .extern artSet16StaticFromCode
+ENTRY art_quick_set16_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set16_static
+
+ /*
+ * Called by managed code to resolve a static field and store a 32-bit primitive value.
+ */
+ .extern artSet32StaticFromCode
+ENTRY art_quick_set32_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set32_static
+
+ /*
+ * Called by managed code to resolve a static field and store a 64-bit primitive value.
+ */
+ .extern artSet64StaticFromCode
+ENTRY art_quick_set64_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, $a1 # pass new_val
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set64_static
+
+ /*
+ * Called by managed code to resolve a static field and store an object reference.
+ */
+ .extern artSetObjStaticFromCode
+ENTRY art_quick_set_obj_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set_obj_static
+
+ /*
+ * Called by managed code to resolve an instance field and store a 8-bit primitive value.
+ */
+ .extern artSet8InstanceFromCode
+ENTRY art_quick_set8_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set8_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store a 16-bit primitive value.
+ */
+ .extern artSet16InstanceFromCode
+ENTRY art_quick_set16_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set16_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store a 32-bit primitive value.
+ */
+ .extern artSet32InstanceFromCode
+ENTRY art_quick_set32_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set32_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store a 64-bit primitive value.
+ */
+ .extern artSet64InstanceFromCode
+ENTRY art_quick_set64_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set64_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store an object reference.
+ */
+ .extern artSetObjInstanceFromCode
+ENTRY art_quick_set_obj_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set_obj_instance
+
+ /*
+ * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
+ * exception on error. On success the String is returned. R0 holds the referring method,
+ * R1 holds the string index. The fast path check for hit in strings cache has already been
+ * performed.
+ */
+ .extern artResolveStringFromCode
+ENTRY art_quick_resolve_string
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, $sp)
+ jal artResolveStringFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_resolve_string
// Macro to facilitate adding new allocation entrypoints.
.macro TWO_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
ENTRY \name
- break
- break
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal \entrypoint
+ move $a2, rSELF # pass Thread::Current
+ \return
END \name
.endm
.macro THREE_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
ENTRY \name
- break
- break
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal \entrypoint
+ move $a3, rSELF # pass Thread::Current
+ \return
END \name
.endm
// Generate the allocation entrypoints for each allocator.
GENERATE_ALL_ALLOC_ENTRYPOINTS
-UNIMPLEMENTED art_quick_test_suspend
+ /*
+ * Called by managed code when the value in rSUSPEND has been decremented to 0.
+ */
+ .extern artTestSuspendFromCode
+ENTRY art_quick_test_suspend
+ lh $a0, THREAD_FLAGS_OFFSET(rSELF)
+ bne $a0, $zero, 1f
+ daddiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+ jalr $zero, $ra
+ nop
+1:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
+ jal artTestSuspendFromCode # (Thread*)
+ move $a0, rSELF
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_test_suspend
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
@@ -779,7 +1350,19 @@
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
-UNIMPLEMENTED art_quick_imt_conflict_trampoline
+ /*
+ * Called to resolve an imt conflict. t0 is a hidden argument that holds the target method's
+ * dex method index.
+ */
+ENTRY art_quick_imt_conflict_trampoline
+ lwu $a0, 0($sp) # load caller Method*
+ lwu $a0, MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods
+ dsll $t0, 2 # convert target method offset to bytes
+ daddu $a0, $t0 # get address of target method
+ dla $t9, art_quick_invoke_interface_trampoline
+ jalr $zero, $t9
+ lwu $a0, MIRROR_OBJECT_ARRAY_DATA_OFFSET($a0) # load the target method
+END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
@@ -930,6 +1513,18 @@
.cfi_adjust_cfa_offset -(16+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
END art_quick_instrumentation_exit
-UNIMPLEMENTED art_quick_deoptimize
+ /*
+ * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+ .extern artDeoptimize
+ .extern artEnterInterpreterFromDeoptimize
+ENTRY art_quick_deoptimize
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ jal artDeoptimize # artDeoptimize(Thread*, SP)
+ # Returns caller method's frame size.
+ move $a0, rSELF # pass Thread::current
+END art_quick_deoptimize
+
UNIMPLEMENTED art_quick_indexof
UNIMPLEMENTED art_quick_string_compareto
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 0d41a8f..0769687 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1171,7 +1171,7 @@
reinterpret_cast<size_t>(nullptr),
StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
self);
- EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
+ EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
EXPECT_TRUE(obj->IsArrayInstance());
@@ -2060,7 +2060,7 @@
env->CallBooleanMethod(jarray_list, add_jmethod, jobj);
- ASSERT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
+ ASSERT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException());
// Contains.
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 65c65e2..0f874a4 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1551,7 +1551,9 @@
CFI_ADJUST_CFA_OFFSET(-8)
POP rax // Restore integer result.
- addq LITERAL(FRAME_SIZE_REFS_ONLY_CALLEE_SAVE), %rsp // Drop save frame and fake return pc.
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+
+ addq LITERAL(8), %rsp // Drop fake return pc.
jmp *%rdi // Return.
END_FUNCTION art_quick_instrumentation_exit
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index ee70fe7..92f4ebe 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -39,7 +39,7 @@
// impacts where samples will occur. Reducing the count as much as possible improves profiler
// accuracy in tools like traceview.
// TODO: get a compiler that can do a proper job of loop optimization and remove this.
-#define SUSPEND_CHECK_INTERVAL 1000
+#define SUSPEND_CHECK_INTERVAL 96
#endif
#if defined(__cplusplus)
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index e6380bf..e37aca1 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -26,9 +26,6 @@
namespace art {
-// Memmap is a bit slower than malloc according to my measurements.
-static constexpr bool kUseMemMap = false;
-static constexpr bool kUseMemSet = true && kUseMemMap;
static constexpr size_t kValgrindRedZoneBytes = 8;
constexpr size_t Arena::kDefaultSize;
@@ -123,45 +120,47 @@
// Explicitly instantiate the used implementation.
template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
-Arena::Arena(size_t size)
- : bytes_allocated_(0),
- map_(nullptr),
- next_(nullptr) {
- if (kUseMemMap) {
- std::string error_msg;
- map_ = MemMap::MapAnonymous("dalvik-arena", nullptr, size, PROT_READ | PROT_WRITE, false, false,
- &error_msg);
- CHECK(map_ != nullptr) << error_msg;
- memory_ = map_->Begin();
- size_ = map_->Size();
- } else {
- memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
- size_ = size;
- }
+Arena::Arena() : bytes_allocated_(0), next_(nullptr) {
}
-Arena::~Arena() {
- if (kUseMemMap) {
- delete map_;
- } else {
- free(reinterpret_cast<void*>(memory_));
- }
+MallocArena::MallocArena(size_t size) {
+ memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
+ size_ = size;
}
-void Arena::Reset() {
- if (bytes_allocated_) {
- if (kUseMemSet || !kUseMemMap) {
- memset(Begin(), 0, bytes_allocated_);
- } else {
- map_->MadviseDontNeedAndZero();
- }
+MallocArena::~MallocArena() {
+ free(reinterpret_cast<void*>(memory_));
+}
+
+MemMapArena::MemMapArena(size_t size) {
+ std::string error_msg;
+ map_.reset(
+ MemMap::MapAnonymous("dalvik-LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, false,
+ false, &error_msg));
+ CHECK(map_.get() != nullptr) << error_msg;
+ memory_ = map_->Begin();
+ size_ = map_->Size();
+}
+
+void MemMapArena::Release() {
+ if (bytes_allocated_ > 0) {
+ map_->MadviseDontNeedAndZero();
bytes_allocated_ = 0;
}
}
-ArenaPool::ArenaPool()
- : lock_("Arena pool lock"),
- free_arenas_(nullptr) {
+void Arena::Reset() {
+ if (bytes_allocated_ > 0) {
+ memset(Begin(), 0, bytes_allocated_);
+ bytes_allocated_ = 0;
+ }
+}
+
+ArenaPool::ArenaPool(bool use_malloc)
+ : use_malloc_(use_malloc), lock_("Arena pool lock"), free_arenas_(nullptr) {
+ if (!use_malloc) {
+ MemMap::Init();
+ }
}
ArenaPool::~ArenaPool() {
@@ -183,12 +182,22 @@
}
}
if (ret == nullptr) {
- ret = new Arena(size);
+ ret = use_malloc_ ? static_cast<Arena*>(new MallocArena(size)) : new MemMapArena(size);
}
ret->Reset();
return ret;
}
+void ArenaPool::TrimMaps() {
+ if (!use_malloc_) {
+ // Doesn't work for malloc.
+ MutexLock lock(Thread::Current(), lock_);
+ for (auto* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+ arena->Release();
+ }
+ }
+}
+
size_t ArenaPool::GetBytesAllocated() const {
size_t total = 0;
MutexLock lock(Thread::Current(), lock_);
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 9237391..cc7b856 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -116,9 +116,12 @@
class Arena {
public:
static constexpr size_t kDefaultSize = 128 * KB;
- explicit Arena(size_t size = kDefaultSize);
- ~Arena();
+ Arena();
+ virtual ~Arena() { }
+ // Reset is for pre-use and uses memset for performance.
void Reset();
+ // Release is used inbetween uses and uses madvise for memory usage.
+ virtual void Release() { }
uint8_t* Begin() {
return memory_;
}
@@ -139,29 +142,50 @@
return bytes_allocated_;
}
- private:
+ protected:
size_t bytes_allocated_;
uint8_t* memory_;
size_t size_;
- MemMap* map_;
Arena* next_;
friend class ArenaPool;
friend class ArenaAllocator;
friend class ArenaStack;
friend class ScopedArenaAllocator;
template <bool kCount> friend class ArenaAllocatorStatsImpl;
+
+ private:
DISALLOW_COPY_AND_ASSIGN(Arena);
};
+class MallocArena FINAL : public Arena {
+ public:
+ explicit MallocArena(size_t size = Arena::kDefaultSize);
+ virtual ~MallocArena();
+};
+
+class MemMapArena FINAL : public Arena {
+ public:
+ explicit MemMapArena(size_t size = Arena::kDefaultSize);
+ virtual ~MemMapArena() { }
+ void Release() OVERRIDE;
+
+ private:
+ std::unique_ptr<MemMap> map_;
+};
+
class ArenaPool {
public:
- ArenaPool();
+ explicit ArenaPool(bool use_malloc = true);
~ArenaPool();
Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_);
void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_);
size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_);
+ // Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works
+ // use_malloc is false.
+ void TrimMaps() LOCKS_EXCLUDED(lock_);
private:
+ const bool use_malloc_;
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Arena* free_arenas_ GUARDED_BY(lock_);
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 3d007ba..014f4ab 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -45,6 +45,7 @@
bool jit;
bool jni;
bool monitor;
+ bool oat;
bool profiler;
bool signals;
bool startup;
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 7db1d72..2b0167d 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -1065,11 +1065,10 @@
// Verify that, if an exception has been raised, the native code doesn't
// make any JNI calls other than the Exception* methods.
if ((flags_ & kFlag_ExcepOkay) == 0 && self->IsExceptionPending()) {
- ThrowLocation throw_location;
- mirror::Throwable* exception = self->GetException(&throw_location);
- std::string type(PrettyTypeOf(exception));
- AbortF("JNI %s called with pending exception '%s' thrown in %s",
- function_name_, type.c_str(), throw_location.Dump().c_str());
+ mirror::Throwable* exception = self->GetException();
+ AbortF("JNI %s called with pending exception %s",
+ function_name_,
+ exception->Dump().c_str());
return false;
}
return true;
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 93062a7..893ab11 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -66,31 +66,36 @@
mirror::ArtMethod* m = GetMethod();
CodeInfo code_info = m->GetOptimizedCodeInfo();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
- DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map, m->GetCodeItem()->registers_size_);
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, m->GetCodeItem()->registers_size_);
MemoryRegion stack_mask = stack_map.GetStackMask();
uint32_t register_mask = stack_map.GetRegisterMask();
for (int i = 0; i < number_of_references; ++i) {
int reg = registers[i];
CHECK(reg < m->GetCodeItem()->registers_size_);
- DexRegisterMap::LocationKind location = dex_register_map.GetLocationKind(reg);
- switch (location) {
- case DexRegisterMap::kNone:
+ DexRegisterLocation location = dex_register_map.GetLocationKindAndValue(reg);
+ switch (location.GetKind()) {
+ case DexRegisterLocation::Kind::kNone:
// Not set, should not be a reference.
CHECK(false);
break;
- case DexRegisterMap::kInStack:
- CHECK(stack_mask.LoadBit(dex_register_map.GetValue(reg) >> 2));
+ case DexRegisterLocation::Kind::kInStack:
+ DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0);
+ CHECK(stack_mask.LoadBit(location.GetValue() / kFrameSlotSize));
break;
- case DexRegisterMap::kInRegister:
- CHECK_NE(register_mask & (1 << dex_register_map.GetValue(reg)), 0u);
+ case DexRegisterLocation::Kind::kInRegister:
+ CHECK_NE(register_mask & (1 << location.GetValue()), 0u);
break;
- case DexRegisterMap::kInFpuRegister:
+ case DexRegisterLocation::Kind::kInFpuRegister:
// In Fpu register, should not be a reference.
CHECK(false);
break;
- case DexRegisterMap::kConstant:
- CHECK_EQ(dex_register_map.GetValue(reg), 0);
+ case DexRegisterLocation::Kind::kConstant:
+ CHECK_EQ(location.GetValue(), 0);
break;
+ default:
+ LOG(FATAL) << "Unexpected location kind"
+ << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
}
}
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 2989b8c..700e1ad 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -48,6 +48,7 @@
#include "leb128.h"
#include "oat.h"
#include "oat_file.h"
+#include "oat_file_assistant.h"
#include "object_lock.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
@@ -82,8 +83,7 @@
va_list args;
va_start(args, fmt);
Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionV(throw_location, "Ljava/lang/NoClassDefFoundError;", fmt, args);
+ self->ThrowNewExceptionV("Ljava/lang/NoClassDefFoundError;", fmt, args);
va_end(args);
}
@@ -103,16 +103,15 @@
if (runtime->IsAotCompiler()) {
// At compile time, accurate errors and NCDFE are disabled to speed compilation.
mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
- self->SetException(ThrowLocation(), pre_allocated);
+ self->SetException(pre_allocated);
} else {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
if (c->GetVerifyErrorClass() != NULL) {
// TODO: change the verifier to store an _instance_, with a useful detail message?
std::string temp;
- self->ThrowNewException(throw_location, c->GetVerifyErrorClass()->GetDescriptor(&temp),
+ self->ThrowNewException(c->GetVerifyErrorClass()->GetDescriptor(&temp),
PrettyDescriptor(c).c_str());
} else {
- self->ThrowNewException(throw_location, "Ljava/lang/NoClassDefFoundError;",
+ self->ThrowNewException("Ljava/lang/NoClassDefFoundError;",
PrettyDescriptor(c).c_str());
}
}
@@ -123,7 +122,7 @@
if (VLOG_IS_ON(class_linker)) {
std::string temp;
LOG(INFO) << "Failed to initialize class " << klass->GetDescriptor(&temp) << " from "
- << klass->GetLocation() << "\n" << Thread::Current()->GetException(nullptr)->Dump();
+ << klass->GetLocation() << "\n" << Thread::Current()->GetException()->Dump();
}
}
@@ -141,9 +140,7 @@
// We only wrap non-Error exceptions; an Error can just be used as-is.
if (!is_error) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewWrappedException(throw_location, "Ljava/lang/ExceptionInInitializerError;",
- nullptr);
+ self->ThrowNewWrappedException("Ljava/lang/ExceptionInInitializerError;", nullptr);
}
VlogClassInitializationFailure(klass);
}
@@ -662,77 +659,6 @@
}
}
-bool ClassLinker::GenerateOatFile(const char* dex_filename,
- int oat_fd,
- const char* oat_cache_filename,
- std::string* error_msg) {
- Locks::mutator_lock_->AssertNotHeld(Thread::Current()); // Avoid starving GC.
- std::string dex2oat(Runtime::Current()->GetCompilerExecutable());
-
- gc::Heap* heap = Runtime::Current()->GetHeap();
- std::string boot_image_option("--boot-image=");
- if (heap->GetImageSpace() == nullptr) {
- // TODO If we get a dex2dex compiler working we could maybe use that, OTOH since we are likely
- // out of space anyway it might not matter.
- *error_msg = StringPrintf("Cannot create oat file for '%s' because we are running "
- "without an image.", dex_filename);
- return false;
- }
- boot_image_option += heap->GetImageSpace()->GetImageLocation();
-
- std::string dex_file_option("--dex-file=");
- dex_file_option += dex_filename;
-
- std::string oat_fd_option("--oat-fd=");
- StringAppendF(&oat_fd_option, "%d", oat_fd);
-
- std::string oat_location_option("--oat-location=");
- oat_location_option += oat_cache_filename;
-
- std::vector<std::string> argv;
- argv.push_back(dex2oat);
- argv.push_back("--runtime-arg");
- argv.push_back("-classpath");
- argv.push_back("--runtime-arg");
- argv.push_back(Runtime::Current()->GetClassPathString());
-
- Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
-
- if (!Runtime::Current()->IsVerificationEnabled()) {
- argv.push_back("--compiler-filter=verify-none");
- }
-
- if (Runtime::Current()->MustRelocateIfPossible()) {
- argv.push_back("--runtime-arg");
- argv.push_back("-Xrelocate");
- } else {
- argv.push_back("--runtime-arg");
- argv.push_back("-Xnorelocate");
- }
-
- if (!kIsTargetBuild) {
- argv.push_back("--host");
- }
-
- argv.push_back(boot_image_option);
- argv.push_back(dex_file_option);
- argv.push_back(oat_fd_option);
- argv.push_back(oat_location_option);
- const std::vector<std::string>& compiler_options = Runtime::Current()->GetCompilerOptions();
- for (size_t i = 0; i < compiler_options.size(); ++i) {
- argv.push_back(compiler_options[i].c_str());
- }
-
- if (!Exec(argv, error_msg)) {
- // Manually delete the file. Ensures there is no garbage left over if the process unexpectedly
- // died. Ignore unlink failure, propagate the original error.
- TEMP_FAILURE_RETRY(unlink(oat_cache_filename));
- return false;
- }
-
- return true;
-}
-
const OatFile* ClassLinker::RegisterOatFile(const OatFile* oat_file) {
WriterMutexLock mu(Thread::Current(), dex_lock_);
if (kIsDebugBuild) {
@@ -782,504 +708,81 @@
return nullptr;
}
+std::vector<std::unique_ptr<const DexFile>> ClassLinker::OpenDexFilesFromOat(
+ const char* dex_location, const char* oat_location,
+ std::vector<std::string>* error_msgs) {
+ CHECK(error_msgs != nullptr);
-// Loads all multi dex files from the given oat file returning true on success.
-//
-// Parameters:
-// oat_file - the oat file to load from
-// dex_location - the dex location used to generate the oat file
-// dex_location_checksum - the checksum of the dex_location (may be null for pre-opted files)
-// generated - whether or not the oat_file existed before or was just (re)generated
-// error_msgs - any error messages will be appended here
-// dex_files - the loaded dex_files will be appended here (only if the loading succeeds)
-static bool LoadMultiDexFilesFromOatFile(const OatFile* oat_file,
- const char* dex_location,
- const uint32_t* dex_location_checksum,
- bool generated,
- std::vector<std::string>* error_msgs,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- if (oat_file == nullptr) {
- return false;
+ // Verify we aren't holding the mutator lock, which could starve GC if we
+ // have to generate or relocate an oat file.
+ Locks::mutator_lock_->AssertNotHeld(Thread::Current());
+
+ OatFileAssistant oat_file_assistant(dex_location, oat_location, kRuntimeISA,
+ !Runtime::Current()->IsAotCompiler());
+
+ // Lock the target oat location to avoid races generating and loading the
+ // oat file.
+ std::string error_msg;
+ if (!oat_file_assistant.Lock(&error_msg)) {
+ // Don't worry too much if this fails. If it does fail, it's unlikely we
+ // can generate an oat file anyway.
+ VLOG(class_linker) << "OatFileAssistant::Lock: " << error_msg;
}
- size_t old_size = dex_files->size(); // To rollback on error.
-
- bool success = true;
- for (size_t i = 0; success; ++i) {
- std::string next_name_str = DexFile::GetMultiDexClassesDexName(i, dex_location);
- const char* next_name = next_name_str.c_str();
-
- uint32_t next_location_checksum;
- uint32_t* next_location_checksum_pointer = &next_location_checksum;
- std::string error_msg;
- if ((i == 0) && (strcmp(next_name, dex_location) == 0)) {
- // When i=0 the multidex name should be the same as the location name. We already have the
- // checksum it so we don't need to recompute it.
- if (dex_location_checksum == nullptr) {
- next_location_checksum_pointer = nullptr;
- } else {
- next_location_checksum = *dex_location_checksum;
- }
- } else if (!DexFile::GetChecksum(next_name, next_location_checksum_pointer, &error_msg)) {
- DCHECK_EQ(false, i == 0 && generated);
- next_location_checksum_pointer = nullptr;
- }
-
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(next_name, nullptr, false);
-
- if (oat_dex_file == nullptr) {
- if (i == 0 && generated) {
- error_msg = StringPrintf("\nFailed to find dex file '%s' (checksum 0x%x) in generated out "
- " file'%s'", dex_location, next_location_checksum,
- oat_file->GetLocation().c_str());
- error_msgs->push_back(error_msg);
- }
- break; // Not found, done.
- }
-
- // Checksum test. Test must succeed when generated.
- success = !generated;
- if (next_location_checksum_pointer != nullptr) {
- success = next_location_checksum == oat_dex_file->GetDexFileLocationChecksum();
- }
-
- if (success) {
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
- if (dex_file.get() == nullptr) {
- success = false;
- error_msgs->push_back(error_msg);
- } else {
- dex_files->push_back(std::move(dex_file));
+ // Check if we already have an up-to-date oat file open.
+ const OatFile* source_oat_file = nullptr;
+ {
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
+ for (const OatFile* oat_file : oat_files_) {
+ CHECK(oat_file != nullptr);
+ if (oat_file_assistant.GivenOatFileIsUpToDate(*oat_file)) {
+ source_oat_file = oat_file;
+ break;
}
}
-
- // When we generated the file, we expect success, or something is terribly wrong.
- CHECK_EQ(false, generated && !success)
- << "dex_location=" << next_name << " oat_location=" << oat_file->GetLocation().c_str()
- << std::hex << " dex_location_checksum=" << next_location_checksum
- << " OatDexFile::GetLocationChecksum()=" << oat_dex_file->GetDexFileLocationChecksum();
}
- if (dex_files->size() == old_size) {
- success = false; // We did not even find classes.dex
- }
-
- if (success) {
- return true;
- } else {
- dex_files->erase(dex_files->begin() + old_size, dex_files->end());
- return false;
- }
-}
-
-// Multidex files make it possible that some, but not all, dex files can be broken/outdated. This
-// complicates the loading process, as we should not use an iterative loading process, because that
-// would register the oat file and dex files that come before the broken one. Instead, check all
-// multidex ahead of time.
-bool ClassLinker::OpenDexFilesFromOat(const char* dex_location, const char* oat_location,
- std::vector<std::string>* error_msgs,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- // 1) Check whether we have an open oat file.
- // This requires a dex checksum, use the "primary" one.
- uint32_t dex_location_checksum;
- uint32_t* dex_location_checksum_pointer = &dex_location_checksum;
- bool have_checksum = true;
- std::string checksum_error_msg;
- if (!DexFile::GetChecksum(dex_location, dex_location_checksum_pointer, &checksum_error_msg)) {
- // This happens for pre-opted files since the corresponding dex files are no longer on disk.
- dex_location_checksum_pointer = nullptr;
- have_checksum = false;
- }
-
- bool needs_registering = false;
-
- const OatFile::OatDexFile* oat_dex_file = FindOpenedOatDexFile(oat_location, dex_location,
- dex_location_checksum_pointer);
- std::unique_ptr<const OatFile> open_oat_file(
- oat_dex_file != nullptr ? oat_dex_file->GetOatFile() : nullptr);
-
- // 2) If we do not have an open one, maybe there's one on disk already.
-
- // In case the oat file is not open, we play a locking game here so
- // that if two different processes race to load and register or generate
- // (or worse, one tries to open a partial generated file) we will be okay.
- // This is actually common with apps that use DexClassLoader to work
- // around the dex method reference limit and that have a background
- // service running in a separate process.
- ScopedFlock scoped_flock;
-
- if (open_oat_file.get() == nullptr) {
- if (oat_location != nullptr) {
- // Can only do this if we have a checksum, else error.
- if (!have_checksum) {
- error_msgs->push_back(checksum_error_msg);
- return false;
- }
-
- std::string error_msg;
-
- // We are loading or creating one in the future. Time to set up the file lock.
- if (!scoped_flock.Init(oat_location, &error_msg)) {
- error_msgs->push_back(error_msg);
- return false;
- }
-
- // TODO Caller specifically asks for this oat_location. We should honor it. Probably?
- open_oat_file.reset(FindOatFileInOatLocationForDexFile(dex_location, dex_location_checksum,
- oat_location, &error_msg));
-
- if (open_oat_file.get() == nullptr) {
- std::string compound_msg = StringPrintf("Failed to find dex file '%s' in oat location '%s': %s",
- dex_location, oat_location, error_msg.c_str());
- VLOG(class_linker) << compound_msg;
- error_msgs->push_back(compound_msg);
- }
- } else {
- // TODO: What to lock here?
- bool obsolete_file_cleanup_failed;
- open_oat_file.reset(FindOatFileContainingDexFileFromDexLocation(dex_location,
- dex_location_checksum_pointer,
- kRuntimeISA, error_msgs,
- &obsolete_file_cleanup_failed));
- // There's no point in going forward and eventually try to regenerate the
- // file if we couldn't remove the obsolete one. Mostly likely we will fail
- // with the same error when trying to write the new file.
- // TODO: should we maybe do this only when we get permission issues? (i.e. EACCESS).
- if (obsolete_file_cleanup_failed) {
- return false;
- }
+ // If we didn't have an up-to-date oat file open, try to load one from disk.
+ if (source_oat_file == nullptr) {
+ // Update the oat file on disk if we can. This may fail, but that's okay.
+ // Best effort is all that matters here.
+ if (!oat_file_assistant.MakeUpToDate(&error_msg)) {
+ LOG(WARNING) << error_msg;
}
- needs_registering = true;
- }
- // 3) If we have an oat file, check all contained multidex files for our dex_location.
- // Note: LoadMultiDexFilesFromOatFile will check for nullptr in the first argument.
- bool success = LoadMultiDexFilesFromOatFile(open_oat_file.get(), dex_location,
- dex_location_checksum_pointer,
- false, error_msgs, dex_files);
- if (success) {
- const OatFile* oat_file = open_oat_file.release(); // Avoid deleting it.
- if (needs_registering) {
- // We opened the oat file, so we must register it.
- RegisterOatFile(oat_file);
- }
- // If the file isn't executable we failed patchoat but did manage to get the dex files.
- return oat_file->IsExecutable();
- } else {
- if (needs_registering) {
- // We opened it, delete it.
- open_oat_file.reset();
- } else {
- open_oat_file.release(); // Do not delete open oat files.
+ // Get the oat file on disk.
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ if (oat_file.get() != nullptr) {
+ source_oat_file = oat_file.release();
+ RegisterOatFile(source_oat_file);
}
}
- // 4) If it's not the case (either no oat file or mismatches), regenerate and load.
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
- // Need a checksum, fail else.
- if (!have_checksum) {
- error_msgs->push_back(checksum_error_msg);
- return false;
- }
-
- // Look in cache location if no oat_location is given.
- std::string cache_location;
- if (oat_location == nullptr) {
- // Use the dalvik cache.
- const std::string dalvik_cache(GetDalvikCacheOrDie(GetInstructionSetString(kRuntimeISA)));
- cache_location = GetDalvikCacheFilenameOrDie(dex_location, dalvik_cache.c_str());
- oat_location = cache_location.c_str();
- }
-
- bool has_flock = true;
- // Definitely need to lock now.
- if (!scoped_flock.HasFile()) {
- std::string error_msg;
- if (!scoped_flock.Init(oat_location, &error_msg)) {
- error_msgs->push_back(error_msg);
- has_flock = false;
+ // Load the dex files from the oat file.
+ if (source_oat_file != nullptr) {
+ dex_files = oat_file_assistant.LoadDexFiles(*source_oat_file, dex_location);
+ if (dex_files.empty()) {
+ error_msgs->push_back("Failed to open dex files from "
+ + source_oat_file->GetLocation());
}
}
- if (Runtime::Current()->IsDex2OatEnabled() && has_flock && scoped_flock.HasFile()) {
- // Create the oat file.
- open_oat_file.reset(CreateOatFileForDexLocation(dex_location, scoped_flock.GetFile()->Fd(),
- oat_location, error_msgs));
- }
-
- // Failed, bail.
- if (open_oat_file.get() == nullptr) {
- // dex2oat was disabled or crashed. Add the dex file in the list of dex_files to make progress.
+ // Fall back to running out of the original dex file if we couldn't load any
+ // dex_files from the oat file.
+ if (dex_files.empty()) {
if (Runtime::Current()->IsDexFileFallbackEnabled()) {
- std::string error_msg;
- if (!DexFile::Open(dex_location, dex_location, &error_msg, dex_files)) {
- error_msgs->push_back(error_msg);
+ if (!DexFile::Open(dex_location, dex_location, &error_msg, &dex_files)) {
+ LOG(WARNING) << error_msg;
+ error_msgs->push_back("Failed to open dex files from "
+ + std::string(dex_location));
}
} else {
error_msgs->push_back("Fallback mode disabled, skipping dex files.");
}
- return false;
}
-
- // Try to load again, but stronger checks.
- success = LoadMultiDexFilesFromOatFile(open_oat_file.get(), dex_location,
- dex_location_checksum_pointer,
- true, error_msgs, dex_files);
- if (success) {
- RegisterOatFile(open_oat_file.release());
- return true;
- } else {
- return false;
- }
-}
-
-const OatFile* ClassLinker::FindOatFileInOatLocationForDexFile(const char* dex_location,
- uint32_t dex_location_checksum,
- const char* oat_location,
- std::string* error_msg) {
- std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsAotCompiler(), error_msg));
- if (oat_file.get() == nullptr) {
- *error_msg = StringPrintf("Failed to find existing oat file at %s: %s", oat_location,
- error_msg->c_str());
- return nullptr;
- }
- Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
- if (image_space != nullptr) {
- const ImageHeader& image_header = image_space->GetImageHeader();
- uint32_t expected_image_oat_checksum = image_header.GetOatChecksum();
- uint32_t actual_image_oat_checksum = oat_file->GetOatHeader().GetImageFileLocationOatChecksum();
- if (expected_image_oat_checksum != actual_image_oat_checksum) {
- *error_msg = StringPrintf("Failed to find oat file at '%s' with expected image oat checksum of "
- "0x%x, found 0x%x", oat_location, expected_image_oat_checksum,
- actual_image_oat_checksum);
- return nullptr;
- }
-
- uintptr_t expected_image_oat_offset = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
- uint32_t actual_image_oat_offset = oat_file->GetOatHeader().GetImageFileLocationOatDataBegin();
- if (expected_image_oat_offset != actual_image_oat_offset) {
- *error_msg = StringPrintf("Failed to find oat file at '%s' with expected image oat offset %"
- PRIuPTR ", found %ud", oat_location, expected_image_oat_offset,
- actual_image_oat_offset);
- return nullptr;
- }
- int32_t expected_patch_delta = image_header.GetPatchDelta();
- int32_t actual_patch_delta = oat_file->GetOatHeader().GetImagePatchDelta();
- if (expected_patch_delta != actual_patch_delta) {
- *error_msg = StringPrintf("Failed to find oat file at '%s' with expected patch delta %d, "
- " found %d", oat_location, expected_patch_delta, actual_patch_delta);
- return nullptr;
- }
- }
-
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
- &dex_location_checksum);
- if (oat_dex_file == nullptr) {
- *error_msg = StringPrintf("Failed to find oat file at '%s' containing '%s'", oat_location,
- dex_location);
- return nullptr;
- }
- uint32_t expected_dex_checksum = dex_location_checksum;
- uint32_t actual_dex_checksum = oat_dex_file->GetDexFileLocationChecksum();
- if (expected_dex_checksum != actual_dex_checksum) {
- *error_msg = StringPrintf("Failed to find oat file at '%s' with expected dex checksum of 0x%x, "
- "found 0x%x", oat_location, expected_dex_checksum,
- actual_dex_checksum);
- return nullptr;
- }
- std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(error_msg));
- if (dex_file.get() != nullptr) {
- return oat_file.release();
- } else {
- return nullptr;
- }
-}
-
-const OatFile* ClassLinker::CreateOatFileForDexLocation(const char* dex_location,
- int fd, const char* oat_location,
- std::vector<std::string>* error_msgs) {
- // Generate the output oat file for the dex file
- VLOG(class_linker) << "Generating oat file " << oat_location << " for " << dex_location;
- std::string error_msg;
- if (!GenerateOatFile(dex_location, fd, oat_location, &error_msg)) {
- CHECK(!error_msg.empty());
- error_msgs->push_back(error_msg);
- return nullptr;
- }
- std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsAotCompiler(),
- &error_msg));
- if (oat_file.get() == nullptr) {
- std::string compound_msg = StringPrintf("\nFailed to open generated oat file '%s': %s",
- oat_location, error_msg.c_str());
- error_msgs->push_back(compound_msg);
- return nullptr;
- }
-
- return oat_file.release();
-}
-
-bool ClassLinker::VerifyOatImageChecksum(const OatFile* oat_file,
- const InstructionSet instruction_set) {
- Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
- if (image_space == nullptr) {
- return false;
- }
- uint32_t image_oat_checksum = 0;
- if (instruction_set == kRuntimeISA) {
- const ImageHeader& image_header = image_space->GetImageHeader();
- image_oat_checksum = image_header.GetOatChecksum();
- } else {
- std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
- image_space->GetImageLocation().c_str(), instruction_set));
- image_oat_checksum = image_header->GetOatChecksum();
- }
- return oat_file->GetOatHeader().GetImageFileLocationOatChecksum() == image_oat_checksum;
-}
-
-bool ClassLinker::VerifyOatChecksums(const OatFile* oat_file,
- const InstructionSet instruction_set,
- std::string* error_msg) {
- Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
- if (image_space == nullptr) {
- *error_msg = "No image space for verification against";
- return false;
- }
-
- // If the requested instruction set is the same as the current runtime,
- // we can use the checksums directly. If it isn't, we'll have to read the
- // image header from the image for the right instruction set.
- uint32_t image_oat_checksum = 0;
- uintptr_t image_oat_data_begin = 0;
- int32_t image_patch_delta = 0;
- if (instruction_set == runtime->GetInstructionSet()) {
- const ImageHeader& image_header = image_space->GetImageHeader();
- image_oat_checksum = image_header.GetOatChecksum();
- image_oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
- image_patch_delta = image_header.GetPatchDelta();
- } else {
- std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
- image_space->GetImageLocation().c_str(), instruction_set));
- image_oat_checksum = image_header->GetOatChecksum();
- image_oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
- image_patch_delta = image_header->GetPatchDelta();
- }
- const OatHeader& oat_header = oat_file->GetOatHeader();
- bool ret = (oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum);
-
- // If the oat file is PIC, it doesn't care if/how image was relocated. Ignore these checks.
- if (!oat_file->IsPic()) {
- ret = ret && (oat_header.GetImagePatchDelta() == image_patch_delta)
- && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin);
- }
- if (!ret) {
- *error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d, %d) with (0x%x, %" PRIdPTR ", %d)",
- oat_file->GetLocation().c_str(),
- oat_file->GetOatHeader().GetImageFileLocationOatChecksum(),
- oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(),
- oat_file->GetOatHeader().GetImagePatchDelta(),
- image_oat_checksum, image_oat_data_begin, image_patch_delta);
- }
- return ret;
-}
-
-bool ClassLinker::VerifyOatAndDexFileChecksums(const OatFile* oat_file,
- const char* dex_location,
- uint32_t dex_location_checksum,
- const InstructionSet instruction_set,
- std::string* error_msg) {
- if (!VerifyOatChecksums(oat_file, instruction_set, error_msg)) {
- return false;
- }
-
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
- &dex_location_checksum);
- if (oat_dex_file == nullptr) {
- *error_msg = StringPrintf("oat file '%s' does not contain contents for '%s' with checksum 0x%x",
- oat_file->GetLocation().c_str(), dex_location, dex_location_checksum);
- for (const OatFile::OatDexFile* oat_dex_file_in : oat_file->GetOatDexFiles()) {
- *error_msg += StringPrintf("\noat file '%s' contains contents for '%s' with checksum 0x%x",
- oat_file->GetLocation().c_str(),
- oat_dex_file_in->GetDexFileLocation().c_str(),
- oat_dex_file_in->GetDexFileLocationChecksum());
- }
- return false;
- }
-
- DCHECK_EQ(dex_location_checksum, oat_dex_file->GetDexFileLocationChecksum());
- return true;
-}
-
-bool ClassLinker::VerifyOatWithDexFile(const OatFile* oat_file,
- const char* dex_location,
- const uint32_t* dex_location_checksum,
- std::string* error_msg) {
- CHECK(oat_file != nullptr);
- CHECK(dex_location != nullptr);
- std::unique_ptr<const DexFile> dex_file;
- if (dex_location_checksum == nullptr) {
- // If no classes.dex found in dex_location, it has been stripped or is corrupt, assume oat is
- // up-to-date. This is the common case in user builds for jar's and apk's in the /system
- // directory.
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, nullptr);
- if (oat_dex_file == nullptr) {
- *error_msg = StringPrintf("Dex checksum mismatch for location '%s' and failed to find oat "
- "dex file '%s': %s", oat_file->GetLocation().c_str(), dex_location,
- error_msg->c_str());
- return false;
- }
- dex_file = oat_dex_file->OpenDexFile(error_msg);
- } else {
- bool verified = VerifyOatAndDexFileChecksums(oat_file, dex_location, *dex_location_checksum,
- kRuntimeISA, error_msg);
- if (!verified) {
- return false;
- }
- dex_file = oat_file->GetOatDexFile(dex_location,
- dex_location_checksum)->OpenDexFile(error_msg);
- }
- return dex_file.get() != nullptr;
-}
-
-const OatFile* ClassLinker::FindOatFileContainingDexFileFromDexLocation(
- const char* dex_location,
- const uint32_t* dex_location_checksum,
- InstructionSet isa,
- std::vector<std::string>* error_msgs,
- bool* obsolete_file_cleanup_failed) {
- *obsolete_file_cleanup_failed = false;
- bool already_opened = false;
- std::string dex_location_str(dex_location);
- std::unique_ptr<const OatFile> oat_file(OpenOatFileFromDexLocation(dex_location_str, isa,
- &already_opened,
- obsolete_file_cleanup_failed,
- error_msgs));
- std::string error_msg;
- if (oat_file.get() == nullptr) {
- error_msgs->push_back(StringPrintf("Failed to open oat file from dex location '%s'",
- dex_location));
- return nullptr;
- } else if (oat_file->IsExecutable() &&
- !VerifyOatWithDexFile(oat_file.get(), dex_location,
- dex_location_checksum, &error_msg)) {
- error_msgs->push_back(StringPrintf("Failed to verify oat file '%s' found for dex location "
- "'%s': %s", oat_file->GetLocation().c_str(), dex_location,
- error_msg.c_str()));
- return nullptr;
- } else if (!oat_file->IsExecutable() &&
- Runtime::Current()->GetHeap()->HasImageSpace() &&
- !VerifyOatImageChecksum(oat_file.get(), isa)) {
- error_msgs->push_back(StringPrintf("Failed to verify non-executable oat file '%s' found for "
- "dex location '%s'. Image checksum incorrect.",
- oat_file->GetLocation().c_str(), dex_location));
- return nullptr;
- } else {
- return oat_file.release();
- }
+ return dex_files;
}
const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& oat_location) {
@@ -1294,335 +797,6 @@
return nullptr;
}
-const OatFile* ClassLinker::OpenOatFileFromDexLocation(const std::string& dex_location,
- InstructionSet isa,
- bool *already_opened,
- bool *obsolete_file_cleanup_failed,
- std::vector<std::string>* error_msgs) {
- // Find out if we've already opened the file
- const OatFile* ret = nullptr;
- std::string odex_filename(DexFilenameToOdexFilename(dex_location, isa));
- ret = FindOpenedOatFileFromOatLocation(odex_filename);
- if (ret != nullptr) {
- *already_opened = true;
- return ret;
- }
-
- std::string dalvik_cache;
- bool have_android_data = false;
- bool have_dalvik_cache = false;
- bool is_global_cache = false;
- GetDalvikCache(GetInstructionSetString(kRuntimeISA), false, &dalvik_cache,
- &have_android_data, &have_dalvik_cache, &is_global_cache);
- std::string cache_filename;
- if (have_dalvik_cache) {
- cache_filename = GetDalvikCacheFilenameOrDie(dex_location.c_str(), dalvik_cache.c_str());
- ret = FindOpenedOatFileFromOatLocation(cache_filename);
- if (ret != nullptr) {
- *already_opened = true;
- return ret;
- }
- } else {
- // If we need to relocate we should just place odex back where it started.
- cache_filename = odex_filename;
- }
-
- ret = nullptr;
-
- // We know that neither the odex nor the cache'd version is already in use, if it even exists.
- //
- // Now we do the following:
- // 1) Try and open the odex version
- // 2) If present, checksum-verified & relocated correctly return it
- // 3) Close the odex version to free up its address space.
- // 4) Try and open the cache version
- // 5) If present, checksum-verified & relocated correctly return it
- // 6) Close the cache version to free up its address space.
- // 7) If we should relocate:
- // a) If we have opened and checksum-verified the odex version relocate it to
- // 'cache_filename' and return it
- // b) If we have opened and checksum-verified the cache version relocate it in place and return
- // it. This should not happen often (I think only the run-test's will hit this case).
- // 8) If the cache-version was present we should delete it since it must be obsolete if we get to
- // this point.
- // 9) Return nullptr
-
- *already_opened = false;
- const Runtime* runtime = Runtime::Current();
- CHECK(runtime != nullptr);
- bool executable = !runtime->IsAotCompiler();
-
- std::string odex_error_msg;
- bool should_patch_system = false;
- bool odex_checksum_verified = false;
- bool have_system_odex = false;
- {
- // There is a high probability that both these oat files map similar/the same address
- // spaces so we must scope them like this so they each gets its turn.
- std::unique_ptr<OatFile> odex_oat_file(OatFile::Open(odex_filename, odex_filename, nullptr,
- nullptr,
- executable, &odex_error_msg));
- if (odex_oat_file.get() != nullptr && CheckOatFile(runtime, odex_oat_file.get(), isa,
- &odex_checksum_verified,
- &odex_error_msg)) {
- return odex_oat_file.release();
- } else {
- if (odex_checksum_verified) {
- // We can just relocate
- should_patch_system = true;
- odex_error_msg = "Image Patches are incorrect";
- }
- if (odex_oat_file.get() != nullptr) {
- have_system_odex = true;
- }
- }
- }
-
- std::string cache_error_msg;
- bool should_patch_cache = false;
- bool cache_checksum_verified = false;
- if (have_dalvik_cache) {
- std::unique_ptr<OatFile> cache_oat_file(OatFile::Open(cache_filename, cache_filename, nullptr,
- nullptr,
- executable, &cache_error_msg));
- if (cache_oat_file.get() != nullptr && CheckOatFile(runtime, cache_oat_file.get(), isa,
- &cache_checksum_verified,
- &cache_error_msg)) {
- return cache_oat_file.release();
- } else if (cache_checksum_verified) {
- // We can just relocate
- should_patch_cache = true;
- cache_error_msg = "Image Patches are incorrect";
- }
- } else if (have_android_data) {
- // dalvik_cache does not exist but android data does. This means we should be able to create
- // it, so we should try.
- GetDalvikCacheOrDie(GetInstructionSetString(kRuntimeISA), true);
- }
-
- ret = nullptr;
- std::string error_msg;
- if (runtime->CanRelocate()) {
- // Run relocation
- gc::space::ImageSpace* space = Runtime::Current()->GetHeap()->GetImageSpace();
- if (space != nullptr) {
- const std::string& image_location = space->GetImageLocation();
- if (odex_checksum_verified && should_patch_system) {
- ret = PatchAndRetrieveOat(odex_filename, cache_filename, image_location, isa, &error_msg);
- } else if (cache_checksum_verified && should_patch_cache) {
- CHECK(have_dalvik_cache);
- ret = PatchAndRetrieveOat(cache_filename, cache_filename, image_location, isa, &error_msg);
- }
- } else if (have_system_odex) {
- ret = GetInterpretedOnlyOat(odex_filename, isa, &error_msg);
- }
- }
- if (ret == nullptr && have_dalvik_cache && OS::FileExists(cache_filename.c_str())) {
- // implicitly: were able to fine where the cached version is but we were unable to use it,
- // either as a destination for relocation or to open a file. We should delete it if it is
- // there.
- if (TEMP_FAILURE_RETRY(unlink(cache_filename.c_str())) != 0) {
- std::string rm_error_msg = StringPrintf("Failed to remove obsolete file from %s when "
- "searching for dex file %s: %s",
- cache_filename.c_str(), dex_location.c_str(),
- strerror(errno));
- error_msgs->push_back(rm_error_msg);
- VLOG(class_linker) << rm_error_msg;
- // Let the caller know that we couldn't remove the obsolete file.
- // This is a good indication that further writes may fail as well.
- *obsolete_file_cleanup_failed = true;
- }
- }
- if (ret == nullptr) {
- VLOG(class_linker) << error_msg;
- error_msgs->push_back(error_msg);
- std::string relocation_msg;
- if (runtime->CanRelocate()) {
- relocation_msg = StringPrintf(" and relocation failed");
- }
- if (have_dalvik_cache && cache_checksum_verified) {
- error_msg = StringPrintf("Failed to open oat file from %s (error %s) or %s "
- "(error %s)%s.", odex_filename.c_str(), odex_error_msg.c_str(),
- cache_filename.c_str(), cache_error_msg.c_str(),
- relocation_msg.c_str());
- } else {
- error_msg = StringPrintf("Failed to open oat file from %s (error %s) (no "
- "dalvik_cache availible)%s.", odex_filename.c_str(),
- odex_error_msg.c_str(), relocation_msg.c_str());
- }
- VLOG(class_linker) << error_msg;
- error_msgs->push_back(error_msg);
- }
- return ret;
-}
-
-const OatFile* ClassLinker::GetInterpretedOnlyOat(const std::string& oat_path,
- InstructionSet isa,
- std::string* error_msg) {
- // We open it non-executable
- std::unique_ptr<OatFile> output(OatFile::Open(oat_path, oat_path, nullptr, nullptr, false, error_msg));
- if (output.get() == nullptr) {
- return nullptr;
- }
- if (!Runtime::Current()->GetHeap()->HasImageSpace() ||
- VerifyOatImageChecksum(output.get(), isa)) {
- return output.release();
- } else {
- *error_msg = StringPrintf("Could not use oat file '%s', image checksum failed to verify.",
- oat_path.c_str());
- return nullptr;
- }
-}
-
-const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
- const std::string& output_oat,
- const std::string& image_location,
- InstructionSet isa,
- std::string* error_msg) {
- Runtime* runtime = Runtime::Current();
- DCHECK(runtime != nullptr);
- if (!runtime->GetHeap()->HasImageSpace()) {
- // We don't have an image space so there is no point in trying to patchoat.
- LOG(WARNING) << "Patching of oat file '" << input_oat << "' not attempted because we are "
- << "running without an image. Attempting to use oat file for interpretation.";
- return GetInterpretedOnlyOat(input_oat, isa, error_msg);
- }
- if (!runtime->IsDex2OatEnabled()) {
- // We don't have dex2oat so we can assume we don't have patchoat either. We should just use the
- // input_oat but make sure we only do interpretation on it's dex files.
- LOG(WARNING) << "Patching of oat file '" << input_oat << "' not attempted due to dex2oat being "
- << "disabled. Attempting to use oat file for interpretation";
- return GetInterpretedOnlyOat(input_oat, isa, error_msg);
- }
- Locks::mutator_lock_->AssertNotHeld(Thread::Current()); // Avoid starving GC.
- std::string patchoat(runtime->GetPatchoatExecutable());
-
- std::string isa_arg("--instruction-set=");
- isa_arg += GetInstructionSetString(isa);
- std::string input_oat_filename_arg("--input-oat-file=");
- input_oat_filename_arg += input_oat;
- std::string output_oat_filename_arg("--output-oat-file=");
- output_oat_filename_arg += output_oat;
- std::string patched_image_arg("--patched-image-location=");
- patched_image_arg += image_location;
-
- std::vector<std::string> argv;
- argv.push_back(patchoat);
- argv.push_back(isa_arg);
- argv.push_back(input_oat_filename_arg);
- argv.push_back(output_oat_filename_arg);
- argv.push_back(patched_image_arg);
-
- std::string command_line(Join(argv, ' '));
- LOG(INFO) << "Relocate Oat File: " << command_line;
- bool success = Exec(argv, error_msg);
- if (success) {
- std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, nullptr, nullptr,
- !runtime->IsAotCompiler(), error_msg));
- bool checksum_verified = false;
- if (output.get() != nullptr && CheckOatFile(runtime, output.get(), isa, &checksum_verified,
- error_msg)) {
- return output.release();
- } else if (output.get() != nullptr) {
- *error_msg = StringPrintf("Patching of oat file '%s' succeeded "
- "but output file '%s' failed verifcation: %s",
- input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
- } else {
- *error_msg = StringPrintf("Patching of oat file '%s' succeeded "
- "but was unable to open output file '%s': %s",
- input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
- }
- } else if (!runtime->IsAotCompiler()) {
- // patchoat failed which means we probably don't have enough room to place the output oat file,
- // instead of failing we should just run the interpreter from the dex files in the input oat.
- LOG(WARNING) << "Patching of oat file '" << input_oat << "' failed. Attempting to use oat file "
- << "for interpretation. patchoat failure was: " << *error_msg;
- return GetInterpretedOnlyOat(input_oat, isa, error_msg);
- } else {
- *error_msg = StringPrintf("Patching of oat file '%s to '%s' "
- "failed: %s", input_oat.c_str(), output_oat.c_str(),
- error_msg->c_str());
- }
- return nullptr;
-}
-
-bool ClassLinker::CheckOatFile(const Runtime* runtime, const OatFile* oat_file, InstructionSet isa,
- bool* checksum_verified,
- std::string* error_msg) {
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
- if (image_space == nullptr) {
- *error_msg = "No image space present";
- return false;
- }
- uint32_t real_image_checksum;
- void* real_image_oat_offset;
- int32_t real_patch_delta;
- if (isa == runtime->GetInstructionSet()) {
- const ImageHeader& image_header = image_space->GetImageHeader();
- real_image_checksum = image_header.GetOatChecksum();
- real_image_oat_offset = image_header.GetOatDataBegin();
- real_patch_delta = image_header.GetPatchDelta();
- } else {
- std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
- image_space->GetImageLocation().c_str(), isa));
- real_image_checksum = image_header->GetOatChecksum();
- real_image_oat_offset = image_header->GetOatDataBegin();
- real_patch_delta = image_header->GetPatchDelta();
- }
-
- const OatHeader& oat_header = oat_file->GetOatHeader();
- std::string compound_msg;
-
- uint32_t oat_image_checksum = oat_header.GetImageFileLocationOatChecksum();
- *checksum_verified = oat_image_checksum == real_image_checksum;
- if (!*checksum_verified) {
- StringAppendF(&compound_msg, " Oat Image Checksum Incorrect (expected 0x%x, received 0x%x)",
- real_image_checksum, oat_image_checksum);
- }
-
- bool offset_verified;
- bool patch_delta_verified;
-
- if (!oat_file->IsPic()) {
- // If an oat file is not PIC, we need to check that the image is at the expected location and
- // patched in the same way.
- void* oat_image_oat_offset =
- reinterpret_cast<void*>(oat_header.GetImageFileLocationOatDataBegin());
- offset_verified = oat_image_oat_offset == real_image_oat_offset;
- if (!offset_verified) {
- StringAppendF(&compound_msg, " Oat Image oat offset incorrect (expected 0x%p, received 0x%p)",
- real_image_oat_offset, oat_image_oat_offset);
- }
-
- int32_t oat_patch_delta = oat_header.GetImagePatchDelta();
- patch_delta_verified = oat_patch_delta == real_patch_delta;
- if (!patch_delta_verified) {
- StringAppendF(&compound_msg, " Oat image patch delta incorrect (expected 0x%x, "
- "received 0x%x)", real_patch_delta, oat_patch_delta);
- }
- } else {
- // If an oat file is PIC, we ignore offset and patching delta.
- offset_verified = true;
- patch_delta_verified = true;
- }
-
- bool ret = (*checksum_verified && offset_verified && patch_delta_verified);
- if (!ret) {
- *error_msg = "Oat file failed to verify:" + compound_msg;
- }
- return ret;
-}
-
-const OatFile* ClassLinker::FindOatFileFromOatLocation(const std::string& oat_location,
- std::string* error_msg) {
- const OatFile* oat_file = FindOpenedOatFileFromOatLocation(oat_location);
- if (oat_file != nullptr) {
- return oat_file;
- }
- return OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsAotCompiler(), error_msg);
-}
-
void ClassLinker::InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg) {
ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
DCHECK(obj != nullptr);
@@ -2195,7 +1369,7 @@
// expected and will be wrapped in a ClassNotFoundException. Use the pre-allocated error to
// trigger the chaining with a proper stack trace.
mirror::Throwable* pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
- self->SetException(ThrowLocation(), pre_allocated);
+ self->SetException(pre_allocated);
return nullptr;
}
} else if (Runtime::Current()->UseCompileTimeClassPath()) {
@@ -2227,7 +1401,7 @@
} else {
// Use the pre-allocated NCDFE at compile time to avoid wasting time constructing exceptions.
mirror::Throwable* pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
- self->SetException(ThrowLocation(), pre_allocated);
+ self->SetException(pre_allocated);
return nullptr;
}
} else {
@@ -2259,8 +1433,8 @@
return nullptr;
} else if (result.get() == nullptr) {
// broken loader - throw NPE to be compatible with Dalvik
- ThrowNullPointerException(nullptr, StringPrintf("ClassLoader.loadClass returned null for %s",
- class_name_string.c_str()).c_str());
+ ThrowNullPointerException(StringPrintf("ClassLoader.loadClass returned null for %s",
+ class_name_string.c_str()).c_str());
return nullptr;
} else {
// success, return mirror::Class*
@@ -3529,13 +2703,13 @@
PrettyDescriptor(klass.Get()).c_str(),
PrettyDescriptor(super.Get()).c_str()));
LOG(WARNING) << error_msg << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
- Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
+ Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException()));
if (cause.Get() != nullptr) {
self->ClearException();
}
ThrowVerifyError(klass.Get(), "%s", error_msg.c_str());
if (cause.Get() != nullptr) {
- self->GetException(nullptr)->SetCause(cause.Get());
+ self->GetException()->SetCause(cause.Get());
}
ClassReference ref(klass->GetDexCache()->GetDexFile(), klass->GetDexClassDefIndex());
if (Runtime::Current()->IsAotCompiler()) {
@@ -4168,7 +3342,7 @@
<< PrettyDescriptor(handle_scope_super.Get())
<< " that has unexpected status " << handle_scope_super->GetStatus()
<< "\nPending exception:\n"
- << (self->GetException(nullptr) != nullptr ? self->GetException(nullptr)->Dump() : "");
+ << (self->GetException() != nullptr ? self->GetException()->Dump() : "");
ObjectLock<mirror::Class> lock(self, klass);
// Initialization failed because the super-class is erroneous.
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -5671,12 +4845,12 @@
<< "Expected pending exception for failed resolution of: " << descriptor;
// Convert a ClassNotFoundException to a NoClassDefFoundError.
StackHandleScope<1> hs(self);
- Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
+ Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException()));
if (cause->InstanceOf(GetClassRoot(kJavaLangClassNotFoundException))) {
DCHECK(resolved == nullptr); // No Handle needed to preserve resolved.
self->ClearException();
ThrowNoClassDefFoundError("Failed resolution of: %s", descriptor);
- self->GetException(nullptr)->SetCause(cause.Get());
+ self->GetException()->SetCause(cause.Get());
}
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 6570c5f..75fbdf3 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -313,33 +313,25 @@
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Generate an oat file from a dex file
- bool GenerateOatFile(const char* dex_filename,
- int oat_fd,
- const char* oat_cache_filename,
- std::string* error_msg)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
-
- // Find or create the oat file holding dex_location. Then load all corresponding dex files
- // (if multidex) into the given vector.
- bool OpenDexFilesFromOat(const char* dex_location, const char* oat_location,
- std::vector<std::string>* error_msgs,
- std::vector<std::unique_ptr<const DexFile>>* dex_files)
+ // Finds or creates the oat file holding dex_location. Then loads and returns
+ // all corresponding dex files (there may be more than one dex file loaded
+ // in the case of multidex).
+ // This may return the original, unquickened dex files if the oat file could
+ // not be generated.
+ //
+ // Returns an empty vector if the dex files could not be loaded. In this
+ // case, there will be at least one error message returned describing why no
+ // dex files could not be loaded. The 'error_msgs' argument must not be
+ // null, regardless of whether there is an error or not.
+ //
+ // This method should not be called with the mutator_lock_ held, because it
+ // could end up starving GC if we need to generate or relocate any oat
+ // files.
+ std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat(
+ const char* dex_location, const char* oat_location,
+ std::vector<std::string>* error_msgs)
LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
- // Returns true if the given oat file has the same image checksum as the image it is paired with.
- static bool VerifyOatImageChecksum(const OatFile* oat_file, const InstructionSet instruction_set);
- // Returns true if the oat file checksums match with the image and the offsets are such that it
- // could be loaded with it.
- static bool VerifyOatChecksums(const OatFile* oat_file, const InstructionSet instruction_set,
- std::string* error_msg);
- // Returns true if oat file contains the dex file with the given location and checksum.
- static bool VerifyOatAndDexFileChecksums(const OatFile* oat_file,
- const char* dex_location,
- uint32_t dex_location_checksum,
- InstructionSet instruction_set,
- std::string* error_msg);
-
// Allocate an instance of a java.lang.Object.
mirror::Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -612,73 +604,9 @@
const uint32_t* dex_location_checksum)
LOCKS_EXCLUDED(dex_lock_);
- // Will open the oat file directly without relocating, even if we could/should do relocation.
- const OatFile* FindOatFileFromOatLocation(const std::string& oat_location,
- std::string* error_msg)
- LOCKS_EXCLUDED(dex_lock_);
-
const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
LOCKS_EXCLUDED(dex_lock_);
- const OatFile* OpenOatFileFromDexLocation(const std::string& dex_location,
- InstructionSet isa,
- bool* already_opened,
- bool* obsolete_file_cleanup_failed,
- std::vector<std::string>* error_msg)
- LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
-
- const OatFile* GetInterpretedOnlyOat(const std::string& oat_path,
- InstructionSet isa,
- std::string* error_msg);
-
- const OatFile* PatchAndRetrieveOat(const std::string& input, const std::string& output,
- const std::string& image_location, InstructionSet isa,
- std::string* error_msg)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
-
- bool CheckOatFile(const Runtime* runtime, const OatFile* oat_file, InstructionSet isa,
- bool* checksum_verified, std::string* error_msg);
-
- // Note: will not register the oat file.
- const OatFile* FindOatFileInOatLocationForDexFile(const char* dex_location,
- uint32_t dex_location_checksum,
- const char* oat_location,
- std::string* error_msg)
- LOCKS_EXCLUDED(dex_lock_);
-
- // Creates the oat file from the dex_location to the oat_location. Needs a file descriptor for
- // the file to be written, which is assumed to be under a lock.
- const OatFile* CreateOatFileForDexLocation(const char* dex_location,
- int fd, const char* oat_location,
- std::vector<std::string>* error_msgs)
- LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
-
- // Finds an OatFile that contains a DexFile for the given a DexFile location.
- //
- // Note 1: this will not check open oat files, which are assumed to be stale when this is run.
- // Note 2: Does not register the oat file. It is the caller's job to register if the file is to
- // be kept.
- const OatFile* FindOatFileContainingDexFileFromDexLocation(const char* dex_location,
- const uint32_t* dex_location_checksum,
- InstructionSet isa,
- std::vector<std::string>* error_msgs,
- bool* obsolete_file_cleanup_failed)
- LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
-
- // Verifies:
- // - that the oat file contains the dex file (with a matching checksum, which may be null if the
- // file was pre-opted)
- // - the checksums of the oat file (against the image space)
- // - the checksum of the dex file against dex_location_checksum
- // - that the dex file can be opened
- // Returns true iff all verification succeed.
- //
- // The dex_location is the dex location as stored in the oat file header.
- // (see DexFile::GetDexCanonicalLocation for a description of location conventions)
- bool VerifyOatWithDexFile(const OatFile* oat_file, const char* dex_location,
- const uint32_t* dex_location_checksum,
- std::string* error_msg);
-
mirror::ArtMethod* CreateProxyConstructor(Thread* self, Handle<mirror::Class> klass,
mirror::Class* proxy_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -803,7 +731,6 @@
friend class ImageWriter; // for GetClassRoots
friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
- friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub
friend class NoDex2OatTest; // for FindOpenedOatFileForDexFile
friend class NoPatchoatTest; // for FindOpenedOatFileForDexFile
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 64e129c..1789ab1 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -48,7 +48,7 @@
Thread* self = Thread::Current();
EXPECT_TRUE(class_linker_->FindSystemClass(self, descriptor.c_str()) == nullptr);
EXPECT_TRUE(self->IsExceptionPending());
- mirror::Object* exception = self->GetException(nullptr);
+ mirror::Object* exception = self->GetException();
self->ClearException();
mirror::Class* exception_class =
class_linker_->FindSystemClass(self, "Ljava/lang/NoClassDefFoundError;");
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index b7ffd60..e0d62d7 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -34,8 +34,10 @@
#include "gc_root-inl.h"
#include "gc/heap.h"
#include "gtest/gtest.h"
+#include "interpreter/unstarted_runtime.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
+#include "mem_map.h"
#include "noop_compiler_callbacks.h"
#include "os.h"
#include "runtime-inl.h"
@@ -107,6 +109,8 @@
CHECK_EQ(0, unlink_result);
}
+static bool unstarted_initialized_ = false;
+
CommonRuntimeTest::CommonRuntimeTest() {}
CommonRuntimeTest::~CommonRuntimeTest() {
// Ensure the dex files are cleaned up before the runtime.
@@ -194,6 +198,7 @@
std::unique_ptr<const DexFile> CommonRuntimeTest::LoadExpectSingleDexFile(const char* location) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
+ MemMap::Init();
if (!DexFile::Open(location, location, &error_msg, &dex_files)) {
LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
UNREACHABLE();
@@ -225,13 +230,23 @@
options.push_back(std::make_pair("compilercallbacks", callbacks_.get()));
SetUpRuntimeOptions(&options);
+ PreRuntimeCreate();
if (!Runtime::Create(options, false)) {
LOG(FATAL) << "Failed to create runtime";
return;
}
+ PostRuntimeCreate();
runtime_.reset(Runtime::Current());
class_linker_ = runtime_->GetClassLinker();
class_linker_->FixupDexCaches(runtime_->GetResolutionMethod());
+
+ // Initialize maps for unstarted runtime. This needs to be here, as running clinits needs this
+ // set up.
+ if (!unstarted_initialized_) {
+ interpreter::UnstartedRuntimeInitialize();
+ unstarted_initialized_ = true;
+ }
+
class_linker_->RunRootClinits();
boot_class_path_ = class_linker_->GetBootClassPath();
java_lang_dex_file_ = boot_class_path_[0];
@@ -248,6 +263,8 @@
// pool is created by the runtime.
runtime_->GetHeap()->CreateThreadPool();
runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test
+ // Reduce timinig-dependent flakiness in OOME behavior (eg StubTest.AllocObject).
+ runtime_->GetHeap()->SetMinIntervalHomogeneousSpaceCompactionByOom(0U);
// Get the boot class path from the runtime so it can be used in tests.
boot_class_path_ = class_linker_->GetBootClassPath();
@@ -335,7 +352,7 @@
#define ART_TARGET_NATIVETEST_DIR_STRING ""
#endif
-std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTest::OpenTestDexFiles(const char* name) {
+std::string CommonRuntimeTest::GetTestDexFileName(const char* name) {
CHECK(name != nullptr);
std::string filename;
if (IsHost()) {
@@ -347,6 +364,11 @@
filename += "art-gtest-";
filename += name;
filename += ".jar";
+ return filename;
+}
+
+std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTest::OpenTestDexFiles(const char* name) {
+ std::string filename = GetTestDexFileName(name);
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
bool success = DexFile::Open(filename.c_str(), filename.c_str(), &error_msg, &dex_files);
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 9efea84..cce8485 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -101,11 +101,19 @@
virtual void TearDown();
+ // Called before the runtime is created.
+ virtual void PreRuntimeCreate() {}
+
+ // Called after the runtime is created.
+ virtual void PostRuntimeCreate() {}
+
// Gets the path of the specified dex file for host or target.
static std::string GetDexFileName(const std::string& jar_prefix);
std::string GetTestAndroidRoot();
+ std::string GetTestDexFileName(const char* name);
+
std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index f5b4354..36de221 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -43,7 +43,7 @@
}
}
-static void ThrowException(const ThrowLocation* throw_location, const char* exception_descriptor,
+static void ThrowException(const char* exception_descriptor,
mirror::Class* referrer, const char* fmt, va_list* args = NULL)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
@@ -56,16 +56,10 @@
}
AddReferrerLocation(msg, referrer);
Thread* self = Thread::Current();
- if (throw_location == NULL) {
- ThrowLocation computed_throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewException(computed_throw_location, exception_descriptor, msg.str().c_str());
- } else {
- self->ThrowNewException(*throw_location, exception_descriptor, msg.str().c_str());
- }
+ self->ThrowNewException(exception_descriptor, msg.str().c_str());
}
-static void ThrowWrappedException(const ThrowLocation* throw_location,
- const char* exception_descriptor,
+static void ThrowWrappedException(const char* exception_descriptor,
mirror::Class* referrer, const char* fmt, va_list* args = NULL)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
@@ -78,18 +72,13 @@
}
AddReferrerLocation(msg, referrer);
Thread* self = Thread::Current();
- if (throw_location == NULL) {
- ThrowLocation computed_throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewWrappedException(computed_throw_location, exception_descriptor, msg.str().c_str());
- } else {
- self->ThrowNewWrappedException(*throw_location, exception_descriptor, msg.str().c_str());
- }
+ self->ThrowNewWrappedException(exception_descriptor, msg.str().c_str());
}
// AbstractMethodError
void ThrowAbstractMethodError(mirror::ArtMethod* method) {
- ThrowException(NULL, "Ljava/lang/AbstractMethodError;", NULL,
+ ThrowException("Ljava/lang/AbstractMethodError;", NULL,
StringPrintf("abstract method \"%s\"",
PrettyMethod(method).c_str()).c_str());
}
@@ -97,20 +86,20 @@
// ArithmeticException
void ThrowArithmeticExceptionDivideByZero() {
- ThrowException(NULL, "Ljava/lang/ArithmeticException;", NULL, "divide by zero");
+ ThrowException("Ljava/lang/ArithmeticException;", NULL, "divide by zero");
}
// ArrayIndexOutOfBoundsException
void ThrowArrayIndexOutOfBoundsException(int index, int length) {
- ThrowException(NULL, "Ljava/lang/ArrayIndexOutOfBoundsException;", NULL,
+ ThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", NULL,
StringPrintf("length=%d; index=%d", length, index).c_str());
}
// ArrayStoreException
void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class) {
- ThrowException(NULL, "Ljava/lang/ArrayStoreException;", NULL,
+ ThrowException("Ljava/lang/ArrayStoreException;", NULL,
StringPrintf("%s cannot be stored in an array of type %s",
PrettyDescriptor(element_class).c_str(),
PrettyDescriptor(array_class).c_str()).c_str());
@@ -119,14 +108,14 @@
// ClassCastException
void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) {
- ThrowException(NULL, "Ljava/lang/ClassCastException;", NULL,
+ ThrowException("Ljava/lang/ClassCastException;", NULL,
StringPrintf("%s cannot be cast to %s",
PrettyDescriptor(src_type).c_str(),
PrettyDescriptor(dest_type).c_str()).c_str());
}
-void ThrowClassCastException(const ThrowLocation* throw_location, const char* msg) {
- ThrowException(throw_location, "Ljava/lang/ClassCastException;", NULL, msg);
+void ThrowClassCastException(const char* msg) {
+ ThrowException("Ljava/lang/ClassCastException;", NULL, msg);
}
// ClassCircularityError
@@ -134,7 +123,7 @@
void ThrowClassCircularityError(mirror::Class* c) {
std::ostringstream msg;
msg << PrettyDescriptor(c);
- ThrowException(NULL, "Ljava/lang/ClassCircularityError;", c, msg.str().c_str());
+ ThrowException("Ljava/lang/ClassCircularityError;", c, msg.str().c_str());
}
// ClassFormatError
@@ -142,7 +131,7 @@
void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/ClassFormatError;", referrer, fmt, &args);
+ ThrowException("Ljava/lang/ClassFormatError;", referrer, fmt, &args);
va_end(args);}
// IllegalAccessError
@@ -151,7 +140,7 @@
std::ostringstream msg;
msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' attempting to access '"
<< PrettyDescriptor(accessed) << "'";
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
+ ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
@@ -161,21 +150,21 @@
msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' attempting to access '"
<< PrettyDescriptor(accessed) << "') in attempt to invoke " << type
<< " method " << PrettyMethod(called).c_str();
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
+ ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::ArtMethod* accessed) {
std::ostringstream msg;
msg << "Method '" << PrettyMethod(accessed) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
+ ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
void ThrowIllegalAccessErrorField(mirror::Class* referrer, mirror::ArtField* accessed) {
std::ostringstream msg;
msg << "Field '" << PrettyField(accessed, false) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
+ ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer,
@@ -183,7 +172,7 @@
std::ostringstream msg;
msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '"
<< PrettyMethod(referrer) << "'";
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;",
+ ThrowException("Ljava/lang/IllegalAccessError;",
referrer != NULL ? referrer->GetClass() : NULL,
msg.str().c_str());
}
@@ -191,20 +180,20 @@
void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, fmt, &args);
+ ThrowException("Ljava/lang/IllegalAccessError;", referrer, fmt, &args);
va_end(args);
}
// IllegalAccessException
-void ThrowIllegalAccessException(const ThrowLocation* throw_location, const char* msg) {
- ThrowException(throw_location, "Ljava/lang/IllegalAccessException;", NULL, msg);
+void ThrowIllegalAccessException(const char* msg) {
+ ThrowException("Ljava/lang/IllegalAccessException;", NULL, msg);
}
// IllegalArgumentException
-void ThrowIllegalArgumentException(const ThrowLocation* throw_location, const char* msg) {
- ThrowException(throw_location, "Ljava/lang/IllegalArgumentException;", NULL, msg);
+void ThrowIllegalArgumentException(const char* msg) {
+ ThrowException("Ljava/lang/IllegalArgumentException;", NULL, msg);
}
@@ -216,7 +205,7 @@
std::ostringstream msg;
msg << "The method '" << PrettyMethod(method) << "' was expected to be of type "
<< expected_type << " but instead was found to be of type " << found_type;
- ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;",
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;",
referrer != NULL ? referrer->GetClass() : NULL,
msg.str().c_str());
}
@@ -232,7 +221,7 @@
<< "' does not implement interface '"
<< PrettyDescriptor(interface_method->GetDeclaringClass())
<< "' in call to '" << PrettyMethod(interface_method) << "'";
- ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;",
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;",
referrer != NULL ? referrer->GetClass() : NULL,
msg.str().c_str());
}
@@ -243,14 +232,14 @@
msg << "Expected '" << PrettyField(resolved_field) << "' to be a "
<< (is_static ? "static" : "instance") << " field" << " rather than a "
<< (is_static ? "instance" : "static") << " field";
- ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;", referrer->GetClass(),
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;", referrer->GetClass(),
msg.str().c_str());
}
void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;", referrer, fmt, &args);
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;", referrer, fmt, &args);
va_end(args);
}
@@ -259,14 +248,14 @@
void ThrowIOException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/io/IOException;", NULL, fmt, &args);
+ ThrowException("Ljava/io/IOException;", NULL, fmt, &args);
va_end(args);
}
void ThrowWrappedIOException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowWrappedException(NULL, "Ljava/io/IOException;", NULL, fmt, &args);
+ ThrowWrappedException("Ljava/io/IOException;", NULL, fmt, &args);
va_end(args);
}
@@ -275,19 +264,19 @@
void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/LinkageError;", referrer, fmt, &args);
+ ThrowException("Ljava/lang/LinkageError;", referrer, fmt, &args);
va_end(args);
}
// NegativeArraySizeException
void ThrowNegativeArraySizeException(int size) {
- ThrowException(NULL, "Ljava/lang/NegativeArraySizeException;", NULL,
+ ThrowException("Ljava/lang/NegativeArraySizeException;", NULL,
StringPrintf("%d", size).c_str());
}
void ThrowNegativeArraySizeException(const char* msg) {
- ThrowException(NULL, "Ljava/lang/NegativeArraySizeException;", NULL, msg);
+ ThrowException("Ljava/lang/NegativeArraySizeException;", NULL, msg);
}
// NoSuchFieldError
@@ -299,7 +288,7 @@
std::string temp;
msg << "No " << scope << "field " << name << " of type " << type
<< " in class " << c->GetDescriptor(&temp) << " or its superclasses";
- ThrowException(NULL, "Ljava/lang/NoSuchFieldError;", c, msg.str().c_str());
+ ThrowException("Ljava/lang/NoSuchFieldError;", c, msg.str().c_str());
}
// NoSuchMethodError
@@ -310,97 +299,91 @@
std::string temp;
msg << "No " << type << " method " << name << signature
<< " in class " << c->GetDescriptor(&temp) << " or its super classes";
- ThrowException(NULL, "Ljava/lang/NoSuchMethodError;", c, msg.str().c_str());
+ ThrowException("Ljava/lang/NoSuchMethodError;", c, msg.str().c_str());
}
void ThrowNoSuchMethodError(uint32_t method_idx) {
- Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- mirror::DexCache* dex_cache = throw_location.GetMethod()->GetDeclaringClass()->GetDexCache();
+ mirror::ArtMethod* method = Thread::Current()->GetCurrentMethod(nullptr);
+ mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
std::ostringstream msg;
msg << "No method '" << PrettyMethod(method_idx, dex_file, true) << "'";
- ThrowException(&throw_location, "Ljava/lang/NoSuchMethodError;",
- throw_location.GetMethod()->GetDeclaringClass(), msg.str().c_str());
+ ThrowException("Ljava/lang/NoSuchMethodError;",
+ method->GetDeclaringClass(), msg.str().c_str());
}
// NullPointerException
-void ThrowNullPointerExceptionForFieldAccess(const ThrowLocation& throw_location,
- mirror::ArtField* field, bool is_read) {
+void ThrowNullPointerExceptionForFieldAccess(mirror::ArtField* field, bool is_read) {
std::ostringstream msg;
msg << "Attempt to " << (is_read ? "read from" : "write to")
<< " field '" << PrettyField(field, true) << "' on a null object reference";
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+ ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
}
-static void ThrowNullPointerExceptionForMethodAccessImpl(const ThrowLocation& throw_location,
- uint32_t method_idx,
+static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
const DexFile& dex_file,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
msg << "Attempt to invoke " << type << " method '"
<< PrettyMethod(method_idx, dex_file, true) << "' on a null object reference";
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+ ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
}
-void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location,
- uint32_t method_idx,
+void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
InvokeType type) {
- mirror::DexCache* dex_cache = throw_location.GetMethod()->GetDeclaringClass()->GetDexCache();
+ mirror::DexCache* dex_cache =
+ Thread::Current()->GetCurrentMethod(nullptr)->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
- ThrowNullPointerExceptionForMethodAccessImpl(throw_location, method_idx,
- dex_file, type);
+ ThrowNullPointerExceptionForMethodAccessImpl(method_idx, dex_file, type);
}
-void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location,
- mirror::ArtMethod* method,
+void ThrowNullPointerExceptionForMethodAccess(mirror::ArtMethod* method,
InvokeType type) {
mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
- ThrowNullPointerExceptionForMethodAccessImpl(throw_location, method->GetDexMethodIndex(),
+ ThrowNullPointerExceptionForMethodAccessImpl(method->GetDexMethodIndex(),
dex_file, type);
}
-void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) {
- const DexFile::CodeItem* code = throw_location.GetMethod()->GetCodeItem();
- uint32_t throw_dex_pc = throw_location.GetDexPc();
+void ThrowNullPointerExceptionFromDexPC() {
+ uint32_t throw_dex_pc;
+ mirror::ArtMethod* method = Thread::Current()->GetCurrentMethod(&throw_dex_pc);
+ const DexFile::CodeItem* code = method->GetCodeItem();
CHECK_LT(throw_dex_pc, code->insns_size_in_code_units_);
const Instruction* instr = Instruction::At(&code->insns_[throw_dex_pc]);
switch (instr->Opcode()) {
case Instruction::INVOKE_DIRECT:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_35c(), kDirect);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_35c(), kDirect);
break;
case Instruction::INVOKE_DIRECT_RANGE:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_3rc(), kDirect);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_3rc(), kDirect);
break;
case Instruction::INVOKE_VIRTUAL:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_35c(), kVirtual);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_35c(), kVirtual);
break;
case Instruction::INVOKE_VIRTUAL_RANGE:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_3rc(), kVirtual);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_3rc(), kVirtual);
break;
case Instruction::INVOKE_INTERFACE:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_35c(), kInterface);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_35c(), kInterface);
break;
case Instruction::INVOKE_INTERFACE_RANGE:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_3rc(), kInterface);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_3rc(), kInterface);
break;
case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
// Since we replaced the method index, we ask the verifier to tell us which
// method is invoked at this location.
- mirror::ArtMethod* method =
- verifier::MethodVerifier::FindInvokedMethodAtDexPc(throw_location.GetMethod(),
- throw_location.GetDexPc());
- if (method != NULL) {
+ mirror::ArtMethod* invoked_method =
+ verifier::MethodVerifier::FindInvokedMethodAtDexPc(method, throw_dex_pc);
+ if (invoked_method != NULL) {
// NPE with precise message.
- ThrowNullPointerExceptionForMethodAccess(throw_location, method, kVirtual);
+ ThrowNullPointerExceptionForMethodAccess(invoked_method, kVirtual);
} else {
// NPE with imprecise message.
- ThrowNullPointerException(&throw_location,
- "Attempt to invoke a virtual method on a null object reference");
+ ThrowNullPointerException("Attempt to invoke a virtual method on a null object reference");
}
break;
}
@@ -412,9 +395,8 @@
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
mirror::ArtField* field =
- Runtime::Current()->GetClassLinker()->ResolveField(instr->VRegC_22c(),
- throw_location.GetMethod(), false);
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true /* read */);
+ Runtime::Current()->GetClassLinker()->ResolveField(instr->VRegC_22c(), method, false);
+ ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
break;
}
case Instruction::IGET_QUICK:
@@ -427,15 +409,13 @@
// Since we replaced the field index, we ask the verifier to tell us which
// field is accessed at this location.
mirror::ArtField* field =
- verifier::MethodVerifier::FindAccessedFieldAtDexPc(throw_location.GetMethod(),
- throw_location.GetDexPc());
+ verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
if (field != NULL) {
// NPE with precise message.
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true /* read */);
+ ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
} else {
// NPE with imprecise message.
- ThrowNullPointerException(&throw_location,
- "Attempt to read from a field on a null object reference");
+ ThrowNullPointerException("Attempt to read from a field on a null object reference");
}
break;
}
@@ -447,9 +427,8 @@
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
mirror::ArtField* field =
- Runtime::Current()->GetClassLinker()->ResolveField(instr->VRegC_22c(),
- throw_location.GetMethod(), false);
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false /* write */);
+ Runtime::Current()->GetClassLinker()->ResolveField(instr->VRegC_22c(), method, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
break;
}
case Instruction::IPUT_QUICK:
@@ -462,15 +441,13 @@
// Since we replaced the field index, we ask the verifier to tell us which
// field is accessed at this location.
mirror::ArtField* field =
- verifier::MethodVerifier::FindAccessedFieldAtDexPc(throw_location.GetMethod(),
- throw_location.GetDexPc());
+ verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
if (field != NULL) {
// NPE with precise message.
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false /* write */);
+ ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
} else {
// NPE with imprecise message.
- ThrowNullPointerException(&throw_location,
- "Attempt to write to a field on a null object reference");
+ ThrowNullPointerException("Attempt to write to a field on a null object reference");
}
break;
}
@@ -481,7 +458,7 @@
case Instruction::AGET_BYTE:
case Instruction::AGET_CHAR:
case Instruction::AGET_SHORT:
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", NULL,
"Attempt to read from null array");
break;
case Instruction::APUT:
@@ -491,28 +468,28 @@
case Instruction::APUT_BYTE:
case Instruction::APUT_CHAR:
case Instruction::APUT_SHORT:
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", NULL,
"Attempt to write to null array");
break;
case Instruction::ARRAY_LENGTH:
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", NULL,
"Attempt to get length of null array");
break;
default: {
// TODO: We should have covered all the cases where we expect a NPE above, this
// message/logging is so we can improve any cases we've missed in the future.
- const DexFile& dex_file =
- *throw_location.GetMethod()->GetDeclaringClass()->GetDexCache()->GetDexFile();
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL,
+ const DexFile* dex_file =
+ method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ ThrowException("Ljava/lang/NullPointerException;", NULL,
StringPrintf("Null pointer exception during instruction '%s'",
- instr->DumpString(&dex_file).c_str()).c_str());
+ instr->DumpString(dex_file).c_str()).c_str());
break;
}
}
}
-void ThrowNullPointerException(const ThrowLocation* throw_location, const char* msg) {
- ThrowException(throw_location, "Ljava/lang/NullPointerException;", NULL, msg);
+void ThrowNullPointerException(const char* msg) {
+ ThrowException("Ljava/lang/NullPointerException;", NULL, msg);
}
// RuntimeException
@@ -520,7 +497,7 @@
void ThrowRuntimeException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/RuntimeException;", NULL, fmt, &args);
+ ThrowException("Ljava/lang/RuntimeException;", NULL, fmt, &args);
va_end(args);
}
@@ -529,7 +506,7 @@
void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/VerifyError;", referrer, fmt, &args);
+ ThrowException("Ljava/lang/VerifyError;", referrer, fmt, &args);
va_end(args);
}
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index ebedae0..9e749e3 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -29,7 +29,6 @@
} // namespace mirror
class Signature;
class StringPiece;
-class ThrowLocation;
// AbstractMethodError
@@ -60,7 +59,7 @@
void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowClassCastException(const ThrowLocation* throw_location, const char* msg)
+void ThrowClassCastException(const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// ClassFormatError
@@ -94,12 +93,12 @@
// IllegalAccessException
-void ThrowIllegalAccessException(const ThrowLocation* throw_location, const char* msg)
+void ThrowIllegalAccessException(const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// IllegalArgumentException
-void ThrowIllegalArgumentException(const ThrowLocation* throw_location, const char* msg)
+void ThrowIllegalArgumentException(const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// IncompatibleClassChangeError
@@ -161,25 +160,22 @@
// NullPointerException
-void ThrowNullPointerExceptionForFieldAccess(const ThrowLocation& throw_location,
- mirror::ArtField* field,
+void ThrowNullPointerExceptionForFieldAccess(mirror::ArtField* field,
bool is_read)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location,
- uint32_t method_idx,
+void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location,
- mirror::ArtMethod* method,
+void ThrowNullPointerExceptionForMethodAccess(mirror::ArtMethod* method,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location)
+void ThrowNullPointerExceptionFromDexPC()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNullPointerException(const ThrowLocation* throw_location, const char* msg)
+void ThrowNullPointerException(const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// RuntimeException
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 246125b..9f2a09b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -47,7 +47,6 @@
#include "ScopedPrimitiveArray.h"
#include "handle_scope-inl.h"
#include "thread_list.h"
-#include "throw_location.h"
#include "utf.h"
#include "verifier/method_verifier-inl.h"
#include "well_known_classes.h"
@@ -280,11 +279,9 @@
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
}
- void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object)
+ void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
+ Dbg::PostException(exception_object);
}
// We only care about how many backward branches were executed in the Jit.
@@ -349,26 +346,9 @@
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
- if (receiver != nullptr) {
- callback(&receiver, arg, root_info);
- }
- if (thread != nullptr) {
- callback(&thread, arg, root_info);
- }
- if (klass != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&klass), arg, root_info);
- }
- if (method != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&method), arg, root_info);
- }
-}
-
-void DebugInvokeReq::Clear() {
- invoke_needed = false;
- receiver = nullptr;
- thread = nullptr;
- klass = nullptr;
- method = nullptr;
+ receiver.VisitRootIfNonNull(callback, arg, root_info); // null for static method call.
+ klass.VisitRoot(callback, arg, root_info);
+ method.VisitRoot(callback, arg, root_info);
}
void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
@@ -2785,19 +2765,110 @@
gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
}
-void Dbg::PostException(const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method,
- uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
+/**
+ * Finds the location where this exception will be caught. We search until we reach the top
+ * frame, in which case this exception is considered uncaught.
+ */
+class CatchLocationFinder : public StackVisitor {
+ public:
+ CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(self, context),
+ self_(self),
+ exception_(exception),
+ handle_scope_(self),
+ this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
+ catch_method_(handle_scope_.NewHandle<mirror::ArtMethod>(nullptr)),
+ throw_method_(handle_scope_.NewHandle<mirror::ArtMethod>(nullptr)),
+ catch_dex_pc_(DexFile::kDexNoIndex),
+ throw_dex_pc_(DexFile::kDexNoIndex) {
+ }
+
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = GetMethod();
+ DCHECK(method != nullptr);
+ if (method->IsRuntimeMethod()) {
+ // Ignore callee save method.
+ DCHECK(method->IsCalleeSaveMethod());
+ return true;
+ }
+
+ uint32_t dex_pc = GetDexPc();
+ if (throw_method_.Get() == nullptr) {
+ // First Java method found. It is either the method that threw the exception,
+ // or the Java native method that is reporting an exception thrown by
+ // native code.
+ this_at_throw_.Assign(GetThisObject());
+ throw_method_.Assign(method);
+ throw_dex_pc_ = dex_pc;
+ }
+
+ if (dex_pc != DexFile::kDexNoIndex) {
+ StackHandleScope<2> hs(self_);
+ uint32_t found_dex_pc;
+ Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
+ Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
+ bool unused_clear_exception;
+ found_dex_pc = mirror::ArtMethod::FindCatchBlock(
+ h_method, exception_class, dex_pc, &unused_clear_exception);
+ if (found_dex_pc != DexFile::kDexNoIndex) {
+ catch_method_.Assign(method);
+ catch_dex_pc_ = found_dex_pc;
+ return false; // End stack walk.
+ }
+ }
+ return true; // Continue stack walk.
+ }
+
+ mirror::ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return catch_method_.Get();
+ }
+
+ mirror::ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return throw_method_.Get();
+ }
+
+ mirror::Object* GetThisAtThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return this_at_throw_.Get();
+ }
+
+ uint32_t GetCatchDexPc() const {
+ return catch_dex_pc_;
+ }
+
+ uint32_t GetThrowDexPc() const {
+ return throw_dex_pc_;
+ }
+
+ private:
+ Thread* const self_;
+ const Handle<mirror::Throwable>& exception_;
+ StackHandleScope<3> handle_scope_;
+ MutableHandle<mirror::Object> this_at_throw_;
+ MutableHandle<mirror::ArtMethod> catch_method_;
+ MutableHandle<mirror::ArtMethod> throw_method_;
+ uint32_t catch_dex_pc_;
+ uint32_t throw_dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
+};
+
+void Dbg::PostException(mirror::Throwable* exception_object) {
if (!IsDebuggerActive()) {
return;
}
+ StackHandleScope<1> handle_scope(Thread::Current());
+ Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
+ std::unique_ptr<Context> context(Context::Create());
+ CatchLocationFinder clf(Thread::Current(), h_exception, context.get());
+ clf.WalkStack(/* include_transitions */ false);
JDWP::EventLocation exception_throw_location;
- SetEventLocation(&exception_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
+ SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
JDWP::EventLocation exception_catch_location;
- SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
+ SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc());
- gJdwpState->PostException(&exception_throw_location, exception_object, &exception_catch_location,
- throw_location.GetThis());
+ gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location,
+ clf.GetThisAtThrow());
}
void Dbg::PostClassPrepare(mirror::Class* c) {
@@ -3428,10 +3499,14 @@
};
// Allocate single step.
- SingleStepControl* single_step_control = new SingleStepControl(step_size, step_depth,
- visitor.stack_depth,
- visitor.method);
- CHECK(single_step_control != nullptr) << "Failed to allocate SingleStepControl";
+ SingleStepControl* single_step_control =
+ new (std::nothrow) SingleStepControl(step_size, step_depth,
+ visitor.stack_depth, visitor.method);
+ if (single_step_control == nullptr) {
+ LOG(ERROR) << "Failed to allocate SingleStepControl";
+ return JDWP::ERR_OUT_OF_MEMORY;
+ }
+
mirror::ArtMethod* m = single_step_control->GetMethod();
const int32_t line_number = visitor.line_number;
if (!m->IsNative()) {
@@ -3508,7 +3583,7 @@
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* targetThread = nullptr;
- DebugInvokeReq* req = nullptr;
+ std::unique_ptr<DebugInvokeReq> req;
Thread* self = Thread::Current();
{
ScopedObjectAccessUnchecked soa(self);
@@ -3519,8 +3594,13 @@
LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
return error;
}
- req = targetThread->GetInvokeReq();
- if (!req->ready) {
+ if (targetThread->GetInvokeReq() != nullptr) {
+ // Thread is already invoking a method on behalf of the debugger.
+ LOG(ERROR) << "InvokeMethod request for thread already invoking a method: " << *targetThread;
+ return JDWP::ERR_ALREADY_INVOKING;
+ }
+ if (!targetThread->IsReadyForDebugInvoke()) {
+ // Thread is not suspended by an event so it cannot invoke a method.
LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
return JDWP::ERR_INVALID_THREAD;
}
@@ -3554,11 +3634,10 @@
return JDWP::ERR_INVALID_OBJECT;
}
- mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id, &error);
+ gRegistry->Get<mirror::Object*>(thread_id, &error);
if (error != JDWP::ERR_NONE) {
return JDWP::ERR_INVALID_OBJECT;
}
- // TODO: check that 'thread' is actually a java.lang.Thread!
mirror::Class* c = DecodeClass(class_id, &error);
if (c == nullptr) {
@@ -3616,14 +3695,17 @@
}
}
- req->receiver = receiver;
- req->thread = thread;
- req->klass = c;
- req->method = m;
- req->arg_count = arg_count;
- req->arg_values = arg_values;
- req->options = options;
- req->invoke_needed = true;
+ // Allocates a DebugInvokeReq.
+ req.reset(new (std::nothrow) DebugInvokeReq(receiver, c, m, options, arg_values, arg_count));
+ if (req.get() == nullptr) {
+ LOG(ERROR) << "Failed to allocate DebugInvokeReq";
+ return JDWP::ERR_OUT_OF_MEMORY;
+ }
+
+ // Attach the DebugInvokeReq to the target thread so it executes the method when
+ // it is resumed. Once the invocation completes, it will detach it and signal us
+ // before suspending itself.
+ targetThread->SetDebugInvokeReq(req.get());
}
// The fact that we've released the thread list lock is a bit risky --- if the thread goes
@@ -3657,7 +3739,7 @@
gJdwpState->ReleaseJdwpTokenForCommand();
// Wait for the request to finish executing.
- while (req->invoke_needed) {
+ while (targetThread->GetInvokeReq() != nullptr) {
req->cond.Wait(self);
}
}
@@ -3690,11 +3772,7 @@
// Copy the result.
*pResultTag = req->result_tag;
- if (IsPrimitiveTag(req->result_tag)) {
- *pResultValue = req->result_value.GetJ();
- } else {
- *pResultValue = gRegistry->Add(req->result_value.GetL());
- }
+ *pResultValue = req->result_value;
*pExceptionId = req->exception;
return req->error;
}
@@ -3705,71 +3783,58 @@
// We can be called while an exception is pending. We need
// to preserve that across the method invocation.
StackHandleScope<4> hs(soa.Self());
- auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
- auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
- auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
- uint32_t old_throw_dex_pc;
- {
- ThrowLocation old_throw_location;
- mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
- old_throw_this_object.Assign(old_throw_location.GetThis());
- old_throw_method.Assign(old_throw_location.GetMethod());
- old_exception.Assign(old_exception_obj);
- old_throw_dex_pc = old_throw_location.GetDexPc();
- soa.Self()->ClearException();
- }
+ auto old_exception = hs.NewHandle<mirror::Throwable>(soa.Self()->GetException());
+ soa.Self()->ClearException();
// Translate the method through the vtable, unless the debugger wants to suppress it.
- MutableHandle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
- if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != nullptr) {
- mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
+ MutableHandle<mirror::ArtMethod> m(hs.NewHandle(pReq->method.Read()));
+ if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
+ mirror::ArtMethod* actual_method = pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m.Get());
if (actual_method != m.Get()) {
- VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
+ VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get())
+ << " to " << PrettyMethod(actual_method);
m.Assign(actual_method);
}
}
VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
- << " receiver=" << pReq->receiver
+ << " receiver=" << pReq->receiver.Read()
<< " arg_count=" << pReq->arg_count;
CHECK(m.Get() != nullptr);
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
- pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
- reinterpret_cast<jvalue*>(pReq->arg_values));
+ JValue result = InvokeWithJValues(soa, pReq->receiver.Read(), soa.EncodeMethod(m.Get()),
+ reinterpret_cast<jvalue*>(pReq->arg_values));
- mirror::Throwable* exception = soa.Self()->GetException(nullptr);
- soa.Self()->ClearException();
- pReq->exception = gRegistry->Add(exception);
pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
+ const bool is_object_result = (pReq->result_tag == JDWP::JT_OBJECT);
+ Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
+ Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
+ soa.Self()->ClearException();
+ pReq->exception = gRegistry->Add(exception.Get());
if (pReq->exception != 0) {
- VLOG(jdwp) << " JDWP invocation returning with exception=" << exception
- << " " << exception->Dump();
- pReq->result_value.SetJ(0);
- } else if (pReq->result_tag == JDWP::JT_OBJECT) {
+ VLOG(jdwp) << " JDWP invocation returning with exception=" << exception.Get()
+ << " " << exception->Dump();
+ pReq->result_value = 0;
+ } else if (is_object_result) {
/* if no exception thrown, examine object result more closely */
- JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
+ JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
if (new_tag != pReq->result_tag) {
VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
pReq->result_tag = new_tag;
}
- /*
- * Register the object. We don't actually need an ObjectId yet,
- * but we do need to be sure that the GC won't move or discard the
- * object when we switch out of RUNNING. The ObjectId conversion
- * will add the object to the "do not touch" list.
- *
- * We can't use the "tracked allocation" mechanism here because
- * the object is going to be handed off to a different thread.
- */
- gRegistry->Add(pReq->result_value.GetL());
+ // Register the object in the registry and reference its ObjectId. This ensures
+ // GC safety and prevents from accessing stale reference if the object is moved.
+ pReq->result_value = gRegistry->Add(object_result.Get());
+ } else {
+ // Primitive result.
+ DCHECK(IsPrimitiveTag(pReq->result_tag));
+ pReq->result_value = result.GetJ();
}
if (old_exception.Get() != nullptr) {
- ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
- old_throw_dex_pc);
- soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
+ soa.Self()->SetException(old_exception.Get());
}
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 0c22148..01c9d5d 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -48,40 +48,33 @@
class ScopedObjectAccessUnchecked;
class StackVisitor;
class Thread;
-class ThrowLocation;
/*
* Invoke-during-breakpoint support.
*/
struct DebugInvokeReq {
- DebugInvokeReq()
- : ready(false), invoke_needed(false),
- receiver(NULL), thread(NULL), klass(NULL), method(NULL),
- arg_count(0), arg_values(NULL), options(0), error(JDWP::ERR_NONE),
- result_tag(JDWP::JT_VOID), exception(0),
+ DebugInvokeReq(mirror::Object* invoke_receiver, mirror::Class* invoke_class,
+ mirror::ArtMethod* invoke_method, uint32_t invoke_options,
+ uint64_t* args, uint32_t args_count)
+ : receiver(invoke_receiver), klass(invoke_class), method(invoke_method),
+ arg_count(args_count), arg_values(args), options(invoke_options),
+ error(JDWP::ERR_NONE), result_tag(JDWP::JT_VOID), result_value(0), exception(0),
lock("a DebugInvokeReq lock", kBreakpointInvokeLock),
cond("a DebugInvokeReq condition variable", lock) {
}
- /* boolean; only set when we're in the tail end of an event handler */
- bool ready;
-
- /* boolean; set if the JDWP thread wants this thread to do work */
- bool invoke_needed;
-
/* request */
- mirror::Object* receiver; /* not used for ClassType.InvokeMethod */
- mirror::Object* thread;
- mirror::Class* klass;
- mirror::ArtMethod* method;
- uint32_t arg_count;
- uint64_t* arg_values; /* will be NULL if arg_count_ == 0 */
- uint32_t options;
+ GcRoot<mirror::Object> receiver; // not used for ClassType.InvokeMethod
+ GcRoot<mirror::Class> klass;
+ GcRoot<mirror::ArtMethod> method;
+ const uint32_t arg_count;
+ uint64_t* const arg_values; // will be NULL if arg_count_ == 0
+ const uint32_t options;
/* result */
JDWP::JdwpError error;
JDWP::JdwpTag result_tag;
- JValue result_value;
+ uint64_t result_value; // either a primitive value or an ObjectId
JDWP::ObjectId exception;
/* condition variable to wait on while the method executes */
@@ -91,8 +84,6 @@
void VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Clear();
-
private:
DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq);
};
@@ -529,8 +520,7 @@
mirror::Object* this_object, mirror::ArtField* f,
const JValue* field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void PostException(const ThrowLocation& throw_location, mirror::ArtMethod* catch_method,
- uint32_t catch_dex_pc, mirror::Throwable* exception)
+ static void PostException(mirror::Throwable* exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostThreadStart(Thread* t)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -582,6 +572,8 @@
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Invoke support for commands ClassType.InvokeMethod, ClassType.NewInstance and
+ // ObjectReference.InvokeMethod.
static JDWP::JdwpError InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
JDWP::RefTypeId class_id, JDWP::MethodId method_id,
uint32_t arg_count, uint64_t* arg_values,
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 9d84e4a..8a13d34 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -53,9 +53,7 @@
}
if (kAccessCheck) {
if (UNLIKELY(!klass->IsInstantiable())) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;",
- PrettyDescriptor(klass).c_str());
+ self->ThrowNewException("Ljava/lang/InstantiationError;", PrettyDescriptor(klass).c_str());
*slow_path = true;
return nullptr; // Failure
}
@@ -294,9 +292,7 @@
} else {
if (UNLIKELY(resolved_field->IsPrimitiveType() != is_primitive ||
resolved_field->FieldSize() != expected_size)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- DCHECK(throw_location.GetMethod() == referrer);
- self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
+ self->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;",
"Attempted read of %zd-bit %s on field '%s'",
expected_size * (32 / sizeof(int32_t)),
is_primitive ? "primitive" : "non-primitive",
@@ -367,9 +363,7 @@
} else if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
// Maintain interpreter-like semantics where NullPointerException is thrown
// after potential NoSuchMethodError from class linker.
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- DCHECK_EQ(*referrer, throw_location.GetMethod());
- ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type);
+ ThrowNullPointerExceptionForMethodAccess(method_idx, type);
return nullptr; // Failure.
} else if (access_check) {
// Incompatible class change should have been handled in resolve method.
@@ -613,9 +607,8 @@
inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
// Save any pending exception over monitor exit call.
mirror::Throwable* saved_exception = NULL;
- ThrowLocation saved_throw_location;
if (UNLIKELY(self->IsExceptionPending())) {
- saved_exception = self->GetException(&saved_throw_location);
+ saved_exception = self->GetException();
self->ClearException();
}
// Decode locked object and unlock, before popping local references.
@@ -624,11 +617,11 @@
LOG(FATAL) << "Synchronized JNI code returning with an exception:\n"
<< saved_exception->Dump()
<< "\nEncountered second exception during implicit MonitorExit:\n"
- << self->GetException(NULL)->Dump();
+ << self->GetException()->Dump();
}
// Restore pending exception.
if (saved_exception != NULL) {
- self->SetException(saved_throw_location, saved_exception);
+ self->SetException(saved_exception);
}
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 5ea9f70..70e2851 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -55,10 +55,8 @@
ThrowRuntimeException("Bad filled array request for type %s",
PrettyDescriptor(klass).c_str());
} else {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- DCHECK(throw_location.GetMethod() == referrer);
self->ThrowNewExceptionF(
- throw_location, "Ljava/lang/InternalError;",
+ "Ljava/lang/InternalError;",
"Found type %s; filled-new-array not implemented for anything but 'int'",
PrettyDescriptor(klass).c_str());
}
@@ -187,8 +185,7 @@
error_msg = "Could not create stack trace.";
}
// Throw the exception.
- self->SetException(self->GetCurrentLocationForThrow(),
- reinterpret_cast<mirror::Throwable*>(self->DecodeJObject(exc.get())));
+ self->SetException(reinterpret_cast<mirror::Throwable*>(self->DecodeJObject(exc.get())));
} else {
// Could not allocate a string object.
error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed.";
@@ -282,18 +279,8 @@
// This can cause thread suspension.
mirror::Class* result_type = h_interface_method->GetReturnType();
mirror::Object* result_ref = soa.Decode<mirror::Object*>(result);
- mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj);
- mirror::ArtMethod* proxy_method;
- if (h_interface_method->GetDeclaringClass()->IsInterface()) {
- proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(h_interface_method.Get());
- } else {
- // Proxy dispatch to a method defined in Object.
- DCHECK(h_interface_method->GetDeclaringClass()->IsObjectClass());
- proxy_method = h_interface_method.Get();
- }
- ThrowLocation throw_location(rcvr, proxy_method, -1);
JValue result_unboxed;
- if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, &result_unboxed)) {
+ if (!UnboxPrimitiveForResult(result_ref, result_type, &result_unboxed)) {
DCHECK(soa.Self()->IsExceptionPending());
return zero;
}
@@ -302,7 +289,7 @@
} else {
// In the case of checked exceptions that aren't declared, the exception must be wrapped by
// a UndeclaredThrowableException.
- mirror::Throwable* exception = soa.Self()->GetException(NULL);
+ mirror::Throwable* exception = soa.Self()->GetException();
if (exception->IsCheckedException()) {
mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj);
mirror::Class* proxy_class = rcvr->GetClass();
@@ -328,9 +315,7 @@
declares_exception = declared_exception->IsAssignableFrom(exception_class);
}
if (!declares_exception) {
- ThrowLocation throw_location(rcvr, proxy_method, -1);
- soa.Self()->ThrowNewWrappedException(throw_location,
- "Ljava/lang/reflect/UndeclaredThrowableException;",
+ soa.Self()->ThrowNewWrappedException("Ljava/lang/reflect/UndeclaredThrowableException;",
NULL);
}
}
@@ -341,16 +326,14 @@
bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload) {
DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerException(nullptr, "null array in FILL_ARRAY_DATA");
+ ThrowNullPointerException("null array in FILL_ARRAY_DATA");
return false;
}
mirror::Array* array = obj->AsArray();
DCHECK(!array->IsObjectArray());
if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionF(throw_location,
- "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ self->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
"failed FILL_ARRAY_DATA; length=%d, index=%d",
array->GetLength(), payload->element_count);
return false;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 14ab320..d88d262 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -29,7 +29,7 @@
extern "C" void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
+ self->SetException(Thread::GetDeoptimizationException());
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 7326fcf..22bf939 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -155,8 +155,7 @@
sizeof(int8_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->GetByte(obj);
}
@@ -177,8 +176,7 @@
sizeof(int8_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->GetBoolean(obj);
}
@@ -198,8 +196,7 @@
sizeof(int16_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->GetShort(obj);
}
@@ -220,8 +217,7 @@
sizeof(int16_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->GetChar(obj);
}
@@ -242,8 +238,7 @@
sizeof(int32_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->Get32(obj);
}
@@ -264,8 +259,7 @@
sizeof(int64_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->Get64(obj);
}
@@ -287,8 +281,7 @@
sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->GetObj(obj);
}
@@ -448,8 +441,7 @@
}
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false);
} else {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
@@ -489,8 +481,7 @@
}
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false);
} else {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
@@ -525,8 +516,7 @@
}
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false);
} else {
// Compiled code can't use transactional mode.
field->Set32<false>(obj, new_value);
@@ -551,8 +541,7 @@
sizeof(int64_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false);
} else {
// Compiled code can't use transactional mode.
field->Set64<false>(obj, new_value);
@@ -578,8 +567,7 @@
sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false);
} else {
// Compiled code can't use transactional mode.
field->SetObj<false>(obj, new_value);
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 8ceac97..4423c08 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -25,9 +25,7 @@
NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location(self->GetCurrentLocationForThrow());
- ThrowNullPointerException(&throw_location,
- "Null reference used for synchronization (monitor-enter)");
+ ThrowNullPointerException("Null reference used for synchronization (monitor-enter)");
return -1; // Failure.
} else {
if (kIsDebugBuild) {
@@ -47,9 +45,7 @@
NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location(self->GetCurrentLocationForThrow());
- ThrowNullPointerException(&throw_location,
- "Null reference used for synchronization (monitor-exit)");
+ ThrowNullPointerException("Null reference used for synchronization (monitor-exit)");
return -1; // Failure.
} else {
// MonitorExit may throw exception.
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 25df40b..70317bb 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -41,12 +41,10 @@
* exception_ in thread and delivering the exception.
*/
ScopedQuickEntrypointChecks sqec(self);
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
if (exception == nullptr) {
- self->ThrowNewException(throw_location, "Ljava/lang/NullPointerException;",
- "throw with null exception");
+ self->ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception");
} else {
- self->SetException(throw_location, exception);
+ self->SetException(exception);
}
self->QuickDeliverException();
}
@@ -56,8 +54,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionFromDexPC(throw_location);
+ ThrowNullPointerExceptionFromDexPC();
self->NoteSignalHandlerDone();
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 00251ff..70ee042 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -154,8 +154,6 @@
// | F7 | f_arg7
// | F6 | f_arg6
// | F5 | f_arg5
- // | F6 | f_arg6
- // | F5 | f_arg5
// | F4 | f_arg4
// | F3 | f_arg3
// | F2 | f_arg2
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 9173357..0fdfcb3 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -93,8 +93,7 @@
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, opeer, jpeer, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, jpeer, stack_begin, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_begin, stack_size, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_size, throw_location, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, throw_location, stack_trace_sample, sizeof(ThrowLocation));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_size, stack_trace_sample, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_trace_sample, wait_next, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, wait_next, monitor_enter_object, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, monitor_enter_object, top_handle_scope, sizeof(void*));
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 87ce166..7780935 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -48,9 +48,9 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
auto* klass = GetObjectArrayClass(self, space);
const size_t size = ComputeArraySize(self, klass, component_count, 2);
- size_t bytes_allocated = 0;
+ size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
auto* obj = down_cast<mirror::ObjectArray<mirror::Object>*>(
- space->Alloc(self, size, &bytes_allocated, nullptr));
+ space->Alloc(self, size, &bytes_allocated, nullptr, &bytes_tl_bulk_allocated));
if (obj != nullptr) {
obj->SetClass(klass);
obj->SetLength(static_cast<int32_t>(component_count));
@@ -77,9 +77,10 @@
// copy of the class in the same space that we are allocating in.
DCHECK(java_lang_object_array_ != nullptr);
const size_t class_size = java_lang_object_array_->GetClassSize();
- size_t bytes_allocated = 0;
+ size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
auto* klass = down_cast<mirror::Class*>(space->Alloc(self, class_size, &bytes_allocated,
- nullptr));
+ nullptr,
+ &bytes_tl_bulk_allocated));
DCHECK(klass != nullptr);
memcpy(klass, java_lang_object_array_, class_size);
Runtime::Current()->GetHeap()->GetCardTable()->MarkCard(klass);
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index f6c9d3c..bba92a1 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -28,15 +28,19 @@
}
template<bool kThreadSafe>
-inline ALWAYS_INLINE void* RosAlloc::Alloc(Thread* self, size_t size, size_t* bytes_allocated) {
+inline ALWAYS_INLINE void* RosAlloc::Alloc(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
if (UNLIKELY(size > kLargeSizeThreshold)) {
- return AllocLargeObject(self, size, bytes_allocated);
+ return AllocLargeObject(self, size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
void* m;
if (kThreadSafe) {
- m = AllocFromRun(self, size, bytes_allocated);
+ m = AllocFromRun(self, size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
} else {
- m = AllocFromRunThreadUnsafe(self, size, bytes_allocated);
+ m = AllocFromRunThreadUnsafe(self, size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
// Check if the returned memory is really all zero.
if (ShouldCheckZeroMemory() && m != nullptr) {
@@ -48,6 +52,115 @@
return m;
}
+inline bool RosAlloc::Run::IsFull() {
+ const size_t num_vec = NumberOfBitmapVectors();
+ for (size_t v = 0; v < num_vec; ++v) {
+ if (~alloc_bit_map_[v] != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+inline bool RosAlloc::CanAllocFromThreadLocalRun(Thread* self, size_t size) {
+ if (UNLIKELY(!IsSizeForThreadLocal(size))) {
+ return false;
+ }
+ size_t bracket_size;
+ size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
+ DCHECK_EQ(idx, SizeToIndex(size));
+ DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
+ DCHECK_EQ(bracket_size, bracketSizes[idx]);
+ DCHECK_LE(size, bracket_size);
+ DCHECK(size > 512 || bracket_size - size < 16);
+ DCHECK_LT(idx, kNumThreadLocalSizeBrackets);
+ Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
+ if (kIsDebugBuild) {
+ // Need the lock to prevent race conditions.
+ MutexLock mu(self, *size_bracket_locks_[idx]);
+ CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
+ CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
+ }
+ DCHECK(thread_local_run != nullptr);
+ DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
+ return !thread_local_run->IsFull();
+}
+
+inline void* RosAlloc::AllocFromThreadLocalRun(Thread* self, size_t size,
+ size_t* bytes_allocated) {
+ DCHECK(bytes_allocated != nullptr);
+ if (UNLIKELY(!IsSizeForThreadLocal(size))) {
+ return nullptr;
+ }
+ size_t bracket_size;
+ size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
+ Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
+ if (kIsDebugBuild) {
+ // Need the lock to prevent race conditions.
+ MutexLock mu(self, *size_bracket_locks_[idx]);
+ CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
+ CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
+ }
+ DCHECK(thread_local_run != nullptr);
+ DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
+ void* slot_addr = thread_local_run->AllocSlot();
+ if (LIKELY(slot_addr != nullptr)) {
+ *bytes_allocated = bracket_size;
+ }
+ return slot_addr;
+}
+
+inline size_t RosAlloc::MaxBytesBulkAllocatedFor(size_t size) {
+ if (UNLIKELY(!IsSizeForThreadLocal(size))) {
+ return size;
+ }
+ size_t bracket_size;
+ size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
+ return numOfSlots[idx] * bracket_size;
+}
+
+inline void* RosAlloc::Run::AllocSlot() {
+ const size_t idx = size_bracket_idx_;
+ while (true) {
+ if (kIsDebugBuild) {
+ // Make sure that no slots leaked, the bitmap should be full for all previous vectors.
+ for (size_t i = 0; i < first_search_vec_idx_; ++i) {
+ CHECK_EQ(~alloc_bit_map_[i], 0U);
+ }
+ }
+ uint32_t* const alloc_bitmap_ptr = &alloc_bit_map_[first_search_vec_idx_];
+ uint32_t ffz1 = __builtin_ffs(~*alloc_bitmap_ptr);
+ if (LIKELY(ffz1 != 0)) {
+ const uint32_t ffz = ffz1 - 1;
+ const uint32_t slot_idx = ffz +
+ first_search_vec_idx_ * sizeof(*alloc_bitmap_ptr) * kBitsPerByte;
+ const uint32_t mask = 1U << ffz;
+ DCHECK_LT(slot_idx, numOfSlots[idx]) << "out of range";
+ // Found an empty slot. Set the bit.
+ DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
+ *alloc_bitmap_ptr |= mask;
+ DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
+ uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) +
+ headerSizes[idx] + slot_idx * bracketSizes[idx];
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex
+ << reinterpret_cast<intptr_t>(slot_addr)
+ << ", bracket_size=" << std::dec << bracketSizes[idx]
+ << ", slot_idx=" << slot_idx;
+ }
+ return slot_addr;
+ }
+ const size_t num_words = RoundUp(numOfSlots[idx], 32) / 32;
+ if (first_search_vec_idx_ + 1 >= num_words) {
+ DCHECK(IsFull());
+ // Already at the last word, return null.
+ return nullptr;
+ }
+ // Increase the index to the next word and try again.
+ ++first_search_vec_idx_;
+ }
+}
+
} // namespace allocator
} // namespace gc
} // namespace art
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index f51093a..f64a4ff 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -454,7 +454,10 @@
return byte_size;
}
-void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
+void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
+ DCHECK(bytes_allocated != nullptr);
+ DCHECK(usable_size != nullptr);
DCHECK_GT(size, kLargeSizeThreshold);
size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
void* r;
@@ -470,6 +473,8 @@
}
const size_t total_bytes = num_pages * kPageSize;
*bytes_allocated = total_bytes;
+ *usable_size = total_bytes;
+ *bytes_tl_bulk_allocated = total_bytes;
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
<< "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
@@ -622,7 +627,12 @@
return slot_addr;
}
-void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated) {
+void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
+ DCHECK(bytes_allocated != nullptr);
+ DCHECK(usable_size != nullptr);
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
DCHECK_LE(size, kLargeSizeThreshold);
size_t bracket_size;
size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
@@ -634,14 +644,19 @@
Locks::mutator_lock_->AssertExclusiveHeld(self);
void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
if (LIKELY(slot_addr != nullptr)) {
- DCHECK(bytes_allocated != nullptr);
*bytes_allocated = bracket_size;
- // Caller verifies that it is all 0.
+ *usable_size = bracket_size;
+ *bytes_tl_bulk_allocated = bracket_size;
}
+ // Caller verifies that it is all 0.
return slot_addr;
}
-void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
+void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
+ DCHECK(bytes_allocated != nullptr);
+ DCHECK(usable_size != nullptr);
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
DCHECK_LE(size, kLargeSizeThreshold);
size_t bracket_size;
size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
@@ -712,31 +727,43 @@
self->SetRosAllocRun(idx, thread_local_run);
DCHECK(!thread_local_run->IsFull());
}
-
DCHECK(thread_local_run != nullptr);
DCHECK(!thread_local_run->IsFull());
DCHECK(thread_local_run->IsThreadLocal());
+ // Account for all the free slots in the new or refreshed thread local run.
+ *bytes_tl_bulk_allocated = thread_local_run->NumberOfFreeSlots() * bracket_size;
slot_addr = thread_local_run->AllocSlot();
// Must succeed now with a new run.
DCHECK(slot_addr != nullptr);
+ } else {
+ // The slot is already counted. Leave it as is.
+ *bytes_tl_bulk_allocated = 0;
}
+ DCHECK(slot_addr != nullptr);
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
+ LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex
+ << reinterpret_cast<intptr_t>(slot_addr)
<< "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
<< "(" << std::dec << (bracket_size) << ")";
}
+ *bytes_allocated = bracket_size;
+ *usable_size = bracket_size;
} else {
// Use the (shared) current run.
MutexLock mu(self, *size_bracket_locks_[idx]);
slot_addr = AllocFromCurrentRunUnlocked(self, idx);
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
+ LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex
+ << reinterpret_cast<intptr_t>(slot_addr)
<< "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
<< "(" << std::dec << (bracket_size) << ")";
}
+ if (LIKELY(slot_addr != nullptr)) {
+ *bytes_allocated = bracket_size;
+ *usable_size = bracket_size;
+ *bytes_tl_bulk_allocated = bracket_size;
+ }
}
- DCHECK(bytes_allocated != nullptr);
- *bytes_allocated = bracket_size;
// Caller verifies that it is all 0.
return slot_addr;
}
@@ -852,44 +879,6 @@
return stream.str();
}
-inline void* RosAlloc::Run::AllocSlot() {
- const size_t idx = size_bracket_idx_;
- while (true) {
- if (kIsDebugBuild) {
- // Make sure that no slots leaked, the bitmap should be full for all previous vectors.
- for (size_t i = 0; i < first_search_vec_idx_; ++i) {
- CHECK_EQ(~alloc_bit_map_[i], 0U);
- }
- }
- uint32_t* const alloc_bitmap_ptr = &alloc_bit_map_[first_search_vec_idx_];
- uint32_t ffz1 = __builtin_ffs(~*alloc_bitmap_ptr);
- if (LIKELY(ffz1 != 0)) {
- const uint32_t ffz = ffz1 - 1;
- const uint32_t slot_idx = ffz + first_search_vec_idx_ * sizeof(*alloc_bitmap_ptr) * kBitsPerByte;
- const uint32_t mask = 1U << ffz;
- DCHECK_LT(slot_idx, numOfSlots[idx]) << "out of range";
- // Found an empty slot. Set the bit.
- DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
- *alloc_bitmap_ptr |= mask;
- DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
- uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
- if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
- << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
- }
- return slot_addr;
- }
- const size_t num_words = RoundUp(numOfSlots[idx], 32) / 32;
- if (first_search_vec_idx_ + 1 >= num_words) {
- DCHECK(IsFull());
- // Already at the last word, return null.
- return nullptr;
- }
- // Increase the index to the next word and try again.
- ++first_search_vec_idx_;
- }
-}
-
void RosAlloc::Run::FreeSlot(void* ptr) {
DCHECK(!IsThreadLocal());
const uint8_t idx = size_bracket_idx_;
@@ -920,6 +909,25 @@
}
}
+size_t RosAlloc::Run::NumberOfFreeSlots() {
+ size_t num_alloc_slots = 0;
+ const size_t idx = size_bracket_idx_;
+ const size_t num_slots = numOfSlots[idx];
+ const size_t num_vec = RoundUp(num_slots, 32) / 32;
+ DCHECK_NE(num_vec, 0U);
+ for (size_t v = 0; v < num_vec - 1; v++) {
+ num_alloc_slots += POPCOUNT(alloc_bit_map_[v]);
+ }
+ // Don't count the invalid bits in the last vector.
+ uint32_t last_vec_masked = alloc_bit_map_[num_vec - 1] &
+ ~GetBitmapLastVectorMask(num_slots, num_vec);
+ num_alloc_slots += POPCOUNT(last_vec_masked);
+ size_t num_free_slots = num_slots - num_alloc_slots;
+ DCHECK_LE(num_alloc_slots, num_slots);
+ DCHECK_LE(num_free_slots, num_slots);
+ return num_free_slots;
+}
+
inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
DCHECK(IsThreadLocal());
// Free slots in the alloc bit map based on the thread local free bit map.
@@ -1055,16 +1063,6 @@
return alloc_bit_map_[num_vec - 1] == GetBitmapLastVectorMask(num_slots, num_vec);
}
-inline bool RosAlloc::Run::IsFull() {
- const size_t num_vec = NumberOfBitmapVectors();
- for (size_t v = 0; v < num_vec; ++v) {
- if (~alloc_bit_map_[v] != 0) {
- return false;
- }
- }
- return true;
-}
-
inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
const size_t num_vec = NumberOfBitmapVectors();
for (size_t v = 0; v < num_vec; v++) {
@@ -1654,10 +1652,11 @@
}
}
-void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
+size_t RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
Thread* self = Thread::Current();
// Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
ReaderMutexLock wmu(self, bulk_free_lock_);
+ size_t free_bytes = 0U;
for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
MutexLock mu(self, *size_bracket_locks_[idx]);
Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
@@ -1665,9 +1664,12 @@
// Invalid means already revoked.
DCHECK(thread_local_run->IsThreadLocal());
if (thread_local_run != dedicated_full_run_) {
+ // Note the thread local run may not be full here.
thread->SetRosAllocRun(idx, dedicated_full_run_);
DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
- // Note the thread local run may not be full here.
+ // Count the number of free slots left.
+ size_t num_free_slots = thread_local_run->NumberOfFreeSlots();
+ free_bytes += num_free_slots * bracketSizes[idx];
bool dont_care;
thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
thread_local_run->SetIsThreadLocal(false);
@@ -1677,6 +1679,7 @@
RevokeRun(self, idx, thread_local_run);
}
}
+ return free_bytes;
}
void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
@@ -1719,16 +1722,18 @@
}
}
-void RosAlloc::RevokeAllThreadLocalRuns() {
+size_t RosAlloc::RevokeAllThreadLocalRuns() {
// This is called when a mutator thread won't allocate such as at
// the Zygote creation time or during the GC pause.
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
+ size_t free_bytes = 0U;
for (Thread* thread : thread_list) {
- RevokeThreadLocalRuns(thread);
+ free_bytes += RevokeThreadLocalRuns(thread);
}
RevokeThreadUnsafeCurrentRuns();
+ return free_bytes;
}
void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 3269e10..d1e7ad9 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -230,8 +230,10 @@
static uint32_t GetBitmapLastVectorMask(size_t num_slots, size_t num_vec);
// Returns true if all the slots in the run are not in use.
bool IsAllFree();
+ // Returns the number of free slots.
+ size_t NumberOfFreeSlots();
// Returns true if all the slots in the run are in use.
- bool IsFull();
+ ALWAYS_INLINE bool IsFull();
// Returns true if the bulk free bit map is clean.
bool IsBulkFreeBitmapClean();
// Returns true if the thread local free bit map is clean.
@@ -309,6 +311,15 @@
DCHECK(bracketSizes[idx] == size);
return idx;
}
+ // Returns true if the given allocation size is for a thread local allocation.
+ static bool IsSizeForThreadLocal(size_t size) {
+ DCHECK_GT(kNumThreadLocalSizeBrackets, 0U);
+ size_t max_thread_local_bracket_idx = kNumThreadLocalSizeBrackets - 1;
+ bool is_size_for_thread_local = size <= bracketSizes[max_thread_local_bracket_idx];
+ DCHECK(size > kLargeSizeThreshold ||
+ (is_size_for_thread_local == (SizeToIndex(size) < kNumThreadLocalSizeBrackets)));
+ return is_size_for_thread_local;
+ }
// Rounds up the size up the nearest bracket size.
static size_t RoundToBracketSize(size_t size) {
DCHECK(size <= kLargeSizeThreshold);
@@ -504,11 +515,13 @@
size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Allocate/free a run slot.
- void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
+ void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
LOCKS_EXCLUDED(lock_);
// Allocate/free a run slot without acquiring locks.
// TODO: EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated)
+ void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
LOCKS_EXCLUDED(lock_);
void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx);
@@ -527,7 +540,9 @@
size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
// Allocates large objects.
- void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
+ void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ LOCKS_EXCLUDED(lock_);
// Revoke a run by adding it to non_full_runs_ or freeing the pages.
void RevokeRun(Thread* self, size_t idx, Run* run);
@@ -551,13 +566,26 @@
// If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
// If used, this may cause race conditions if multiple threads are allocating at the same time.
template<bool kThreadSafe = true>
- void* Alloc(Thread* self, size_t size, size_t* bytes_allocated)
+ void* Alloc(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
LOCKS_EXCLUDED(lock_);
size_t Free(Thread* self, void* ptr)
LOCKS_EXCLUDED(bulk_free_lock_);
size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
LOCKS_EXCLUDED(bulk_free_lock_);
+ // Returns true if the given allocation request can be allocated in
+ // an existing thread local run without allocating a new run.
+ ALWAYS_INLINE bool CanAllocFromThreadLocalRun(Thread* self, size_t size);
+ // Allocate the given allocation request in an existing thread local
+ // run without allocating a new run.
+ ALWAYS_INLINE void* AllocFromThreadLocalRun(Thread* self, size_t size, size_t* bytes_allocated);
+
+ // Returns the maximum bytes that could be allocated for the given
+ // size in bulk, that is the maximum value for the
+ // bytes_allocated_bulk out param returned by RosAlloc::Alloc().
+ ALWAYS_INLINE size_t MaxBytesBulkAllocatedFor(size_t size);
+
// Returns the size of the allocated slot for a given allocated memory chunk.
size_t UsableSize(const void* ptr);
// Returns the size of the allocated slot for a given size.
@@ -586,9 +614,13 @@
void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
// Releases the thread-local runs assigned to the given thread back to the common set of runs.
- void RevokeThreadLocalRuns(Thread* thread);
+ // Returns the total bytes of free slots in the revoked thread local runs. This is to be
+ // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
+ size_t RevokeThreadLocalRuns(Thread* thread);
// Releases the thread-local runs assigned to all the threads back to the common set of runs.
- void RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ // Returns the total bytes of free slots in the revoked thread local runs. This is to be
+ // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
+ size_t RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_);
// Assert the thread local runs of a thread are revoked.
void AssertThreadLocalRunsAreRevoked(Thread* thread);
// Assert all the thread local runs are revoked.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index dd45eca..db7a4ef 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1259,8 +1259,9 @@
size_t region_space_bytes_allocated = 0U;
size_t non_moving_space_bytes_allocated = 0U;
size_t bytes_allocated = 0U;
+ size_t dummy;
mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
- region_space_alloc_size, ®ion_space_bytes_allocated, nullptr);
+ region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy);
bytes_allocated = region_space_bytes_allocated;
if (to_ref != nullptr) {
DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
@@ -1286,7 +1287,7 @@
}
fall_back_to_non_moving = true;
to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
- &non_moving_space_bytes_allocated, nullptr);
+ &non_moving_space_bytes_allocated, nullptr, &dummy);
CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
bytes_allocated = non_moving_space_bytes_allocated;
// Mark it in the mark bitmap.
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 8be18be..eafcc45 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -48,6 +48,7 @@
gc_cause_ = gc_cause;
freed_ = ObjectBytePair();
freed_los_ = ObjectBytePair();
+ freed_bytes_revoke_ = 0;
}
uint64_t Iteration::GetEstimatedThroughput() const {
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index b809469..ed5207a 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -75,6 +75,12 @@
uint64_t GetFreedLargeObjects() const {
return freed_los_.objects;
}
+ uint64_t GetFreedRevokeBytes() const {
+ return freed_bytes_revoke_;
+ }
+ void SetFreedRevoke(uint64_t freed) {
+ freed_bytes_revoke_ = freed;
+ }
void Reset(GcCause gc_cause, bool clear_soft_references);
// Returns the estimated throughput of the iteration.
uint64_t GetEstimatedThroughput() const;
@@ -99,6 +105,7 @@
TimingLogger timings_;
ObjectBytePair freed_;
ObjectBytePair freed_los_;
+ uint64_t freed_bytes_revoke_; // see Heap::num_bytes_freed_revoke_.
std::vector<uint64_t> pause_times_;
friend class GarbageCollector;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 8aac484..ee4e752 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -292,6 +292,7 @@
Runtime::Current()->AllowNewSystemWeaks();
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ GetHeap()->RecordFreeRevoke();
// Reclaim unmarked objects.
Sweep(false);
// Swap the live and mark bitmaps for each space which we modified space. This is an
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c1ba5e3..b3d59f2 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -242,6 +242,7 @@
// Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
// before they are properly counted.
RevokeAllThreadLocalBuffers();
+ GetHeap()->RecordFreeRevoke(); // this is for the non-moving rosalloc space used by GSS.
// Record freed memory.
const int64_t from_bytes = from_space_->GetBytesAllocated();
const int64_t to_bytes = bytes_moved_;
@@ -489,17 +490,18 @@
mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
const size_t object_size = obj->SizeOf();
- size_t bytes_allocated;
+ size_t bytes_allocated, dummy;
mirror::Object* forward_address = nullptr;
if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
- nullptr);
+ nullptr, &dummy);
if (UNLIKELY(forward_address == nullptr)) {
// If out of space, fall back to the to-space.
- forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
+ &dummy);
// No logic for marking the bitmap, so it must be null.
DCHECK(to_space_live_bitmap_ == nullptr);
} else {
@@ -544,7 +546,8 @@
}
} else {
// If it's allocated after the last GC (younger), copy it to the to-space.
- forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
+ &dummy);
if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
to_space_live_bitmap_->Set(forward_address);
}
@@ -552,7 +555,7 @@
// If it's still null, attempt to use the fallback space.
if (UNLIKELY(forward_address == nullptr)) {
forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
- nullptr);
+ nullptr, &dummy);
CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
if (bitmap != nullptr) {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index b8c2452..b770096 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -64,6 +64,7 @@
// fragmentation.
}
AllocationTimer alloc_timer(this, &obj);
+ // bytes allocated for the (individual) object.
size_t bytes_allocated;
size_t usable_size;
size_t new_num_bytes_allocated = 0;
@@ -86,13 +87,29 @@
usable_size = bytes_allocated;
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
+ } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
+ (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) &&
+ LIKELY(obj != nullptr)) {
+ DCHECK(!running_on_valgrind_);
+ obj->SetClass(klass);
+ if (kUseBakerOrBrooksReadBarrier) {
+ if (kUseBrooksReadBarrier) {
+ obj->SetReadBarrierPointer(obj);
+ }
+ obj->AssertReadBarrierPointer();
+ }
+ usable_size = bytes_allocated;
+ pre_fence_visitor(obj, usable_size);
+ QuasiAtomic::ThreadFenceForConstructor();
} else {
+ // bytes allocated that takes bulk thread-local buffer allocations into account.
+ size_t bytes_tl_bulk_allocated = 0;
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
- &usable_size);
+ &usable_size, &bytes_tl_bulk_allocated);
if (UNLIKELY(obj == nullptr)) {
bool is_current_allocator = allocator == GetCurrentAllocator();
obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
- &klass);
+ &bytes_tl_bulk_allocated, &klass);
if (obj == nullptr) {
bool after_is_current_allocator = allocator == GetCurrentAllocator();
// If there is a pending exception, fail the allocation right away since the next one
@@ -126,9 +143,9 @@
WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
}
pre_fence_visitor(obj, usable_size);
- new_num_bytes_allocated =
- static_cast<size_t>(num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated))
- + bytes_allocated;
+ new_num_bytes_allocated = static_cast<size_t>(
+ num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_tl_bulk_allocated))
+ + bytes_tl_bulk_allocated;
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
CHECK_LE(obj->SizeOf(), usable_size);
@@ -196,8 +213,10 @@
template <const bool kInstrumented, const bool kGrow>
inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
size_t alloc_size, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
if (allocator_type != kAllocatorTypeTLAB && allocator_type != kAllocatorTypeRegionTLAB &&
+ allocator_type != kAllocatorTypeRosAlloc &&
UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
return nullptr;
}
@@ -210,35 +229,56 @@
if (LIKELY(ret != nullptr)) {
*bytes_allocated = alloc_size;
*usable_size = alloc_size;
+ *bytes_tl_bulk_allocated = alloc_size;
}
break;
}
case kAllocatorTypeRosAlloc: {
if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
// If running on valgrind, we should be using the instrumented path.
- ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
+ size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
+ if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
+ max_bytes_tl_bulk_allocated))) {
+ return nullptr;
+ }
+ ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
} else {
DCHECK(!running_on_valgrind_);
- ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
+ size_t max_bytes_tl_bulk_allocated =
+ rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
+ if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
+ max_bytes_tl_bulk_allocated))) {
+ return nullptr;
+ }
+ if (!kInstrumented) {
+ DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
+ }
+ ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
break;
}
case kAllocatorTypeDlMalloc: {
if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
// If running on valgrind, we should be using the instrumented path.
- ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
+ ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
} else {
DCHECK(!running_on_valgrind_);
- ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
+ ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
break;
}
case kAllocatorTypeNonMoving: {
- ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
+ ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
break;
}
case kAllocatorTypeLOS: {
- ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
+ ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
// Note that the bump pointer spaces aren't necessarily next to
// the other continuous spaces like the non-moving alloc space or
// the zygote space.
@@ -257,20 +297,22 @@
if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
return nullptr;
}
- *bytes_allocated = new_tlab_size;
+ *bytes_tl_bulk_allocated = new_tlab_size;
} else {
- *bytes_allocated = 0;
+ *bytes_tl_bulk_allocated = 0;
}
// The allocation can't fail.
ret = self->AllocTlab(alloc_size);
DCHECK(ret != nullptr);
+ *bytes_allocated = alloc_size;
*usable_size = alloc_size;
break;
}
case kAllocatorTypeRegion: {
DCHECK(region_space_ != nullptr);
alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size);
+ ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
break;
}
case kAllocatorTypeRegionTLAB: {
@@ -283,15 +325,17 @@
// Try to allocate a tlab.
if (!region_space_->AllocNewTlab(self)) {
// Failed to allocate a tlab. Try non-tlab.
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size);
+ ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
return ret;
}
- *bytes_allocated = space::RegionSpace::kRegionSize;
+ *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize;
// Fall-through.
} else {
// Check OOME for a non-tlab allocation.
if (!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size)) {
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size);
+ ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
return ret;
} else {
// Neither tlab or non-tlab works. Give up.
@@ -301,18 +345,20 @@
} else {
// Large. Check OOME.
if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size);
+ ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
return ret;
} else {
return nullptr;
}
}
} else {
- *bytes_allocated = 0;
+ *bytes_tl_bulk_allocated = 0; // Allocated in an existing buffer.
}
// The allocation can't fail.
ret = self->AllocTlab(alloc_size);
DCHECK(ret != nullptr);
+ *bytes_allocated = alloc_size;
*usable_size = alloc_size;
break;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9343622..9421db5 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -156,6 +156,7 @@
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
native_bytes_allocated_(0),
+ num_bytes_freed_revoke_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
verify_pre_gc_heap_(verify_pre_gc_heap),
@@ -438,20 +439,31 @@
// Create our garbage collectors.
for (size_t i = 0; i < 2; ++i) {
const bool concurrent = i != 0;
- garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
- garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
- garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+ if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
+ (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
+ garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
+ garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
+ garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+ }
}
if (kMovingCollector) {
- // TODO: Clean this up.
- const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
- semi_space_collector_ = new collector::SemiSpace(this, generational,
- generational ? "generational" : "");
- garbage_collectors_.push_back(semi_space_collector_);
- concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
- garbage_collectors_.push_back(concurrent_copying_collector_);
- mark_compact_collector_ = new collector::MarkCompact(this);
- garbage_collectors_.push_back(mark_compact_collector_);
+ if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
+ MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
+ use_homogeneous_space_compaction_for_oom_) {
+ // TODO: Clean this up.
+ const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
+ semi_space_collector_ = new collector::SemiSpace(this, generational,
+ generational ? "generational" : "");
+ garbage_collectors_.push_back(semi_space_collector_);
+ }
+ if (MayUseCollector(kCollectorTypeCC)) {
+ concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
+ garbage_collectors_.push_back(concurrent_copying_collector_);
+ }
+ if (MayUseCollector(kCollectorTypeMC)) {
+ mark_compact_collector_ = new collector::MarkCompact(this);
+ garbage_collectors_.push_back(mark_compact_collector_);
+ }
}
if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
(is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
@@ -487,6 +499,10 @@
return nullptr;
}
+bool Heap::MayUseCollector(CollectorType type) const {
+ return foreground_collector_type_ == type || background_collector_type_ == type;
+}
+
space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
size_t growth_limit, size_t capacity,
const char* name, bool can_move_objects) {
@@ -1329,6 +1345,19 @@
}
}
+void Heap::RecordFreeRevoke() {
+ // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
+ // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
+ // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
+ // all the way to zero exactly as the remainder will be subtracted at the next GC.
+ size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
+ CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
+ bytes_freed) << "num_bytes_freed_revoke_ underflow";
+ CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
+ bytes_freed) << "num_bytes_allocated_ underflow";
+ GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
+}
+
space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
for (const auto& space : continuous_spaces_) {
if (space->AsContinuousSpace()->IsRosAllocSpace()) {
@@ -1343,6 +1372,7 @@
mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
size_t alloc_size, size_t* bytes_allocated,
size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated,
mirror::Class** klass) {
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
@@ -1362,7 +1392,7 @@
}
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
@@ -1376,7 +1406,7 @@
}
if (gc_ran) {
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
@@ -1396,7 +1426,7 @@
if (plan_gc_ran) {
// Did we free sufficient memory for the allocation to succeed?
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
@@ -1405,7 +1435,7 @@
// Allocations have failed after GCs; this is an exceptional state.
// Try harder, growing the heap if necessary.
mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
@@ -1422,7 +1452,8 @@
if (was_default_allocator && allocator != GetCurrentAllocator()) {
return nullptr;
}
- ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
if (ptr == nullptr) {
const uint64_t current_time = NanoTime();
switch (allocator) {
@@ -1438,7 +1469,7 @@
case HomogeneousSpaceCompactResult::kSuccess:
// If the allocation succeeded, we delayed an oom.
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
count_delayed_oom_++;
}
@@ -1483,7 +1514,7 @@
} else {
LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
}
}
break;
@@ -1969,8 +2000,8 @@
if (it == bins_.end()) {
// No available space in the bins, place it in the target space instead (grows the zygote
// space).
- size_t bytes_allocated;
- forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
+ size_t bytes_allocated, dummy;
+ forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr, &dummy);
if (to_space_live_bitmap_ != nullptr) {
to_space_live_bitmap_->Set(forward_address);
} else {
@@ -2033,8 +2064,6 @@
non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
const bool same_space = non_moving_space_ == main_space_;
if (kCompactZygote) {
- // Can't compact if the non moving space is the same as the main space.
- DCHECK(semi_space_collector_ != nullptr);
// Temporarily disable rosalloc verification because the zygote
// compaction will mess up the rosalloc internal metadata.
ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
@@ -2053,6 +2082,8 @@
}
} else {
CHECK(main_space_ != nullptr);
+ CHECK_NE(main_space_, non_moving_space_)
+ << "Does not make sense to compact within the same space";
// Copy from the main space.
zygote_collector.SetFromSpace(main_space_);
reset_main_space = true;
@@ -3069,7 +3100,8 @@
SetIdealFootprint(target_size);
if (IsGcConcurrent()) {
const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
- current_gc_iteration_.GetFreedLargeObjectBytes();
+ current_gc_iteration_.GetFreedLargeObjectBytes() +
+ current_gc_iteration_.GetFreedRevokeBytes();
// Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
// how many bytes were allocated during the GC we need to add freed_bytes back on.
CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
@@ -3275,31 +3307,43 @@
void Heap::RevokeThreadLocalBuffers(Thread* thread) {
if (rosalloc_space_ != nullptr) {
- rosalloc_space_->RevokeThreadLocalBuffers(thread);
+ size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
+ if (freed_bytes_revoke > 0U) {
+ num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
+ CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
+ }
}
if (bump_pointer_space_ != nullptr) {
- bump_pointer_space_->RevokeThreadLocalBuffers(thread);
+ CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
}
if (region_space_ != nullptr) {
- region_space_->RevokeThreadLocalBuffers(thread);
+ CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
}
}
void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
if (rosalloc_space_ != nullptr) {
- rosalloc_space_->RevokeThreadLocalBuffers(thread);
+ size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
+ if (freed_bytes_revoke > 0U) {
+ num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
+ CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
+ }
}
}
void Heap::RevokeAllThreadLocalBuffers() {
if (rosalloc_space_ != nullptr) {
- rosalloc_space_->RevokeAllThreadLocalBuffers();
+ size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
+ if (freed_bytes_revoke > 0U) {
+ num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
+ CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
+ }
}
if (bump_pointer_space_ != nullptr) {
- bump_pointer_space_->RevokeAllThreadLocalBuffers();
+ CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
}
if (region_space_ != nullptr) {
- region_space_->RevokeAllThreadLocalBuffers();
+ CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
}
}
@@ -3340,6 +3384,8 @@
// Just finished a GC, attempt to run finalizers.
RunFinalization(env);
CHECK(!env->ExceptionCheck());
+ // Native bytes allocated may be updated by finalization, refresh it.
+ new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
}
// If we still are over the watermark, attempt a GC for alloc and run finalizers.
if (new_native_bytes_allocated > growth_limit_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b2478e6..959ff18 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -390,6 +390,9 @@
// free-list backed space.
void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
+ // Record the bytes freed by thread-local buffer revoke.
+ void RecordFreeRevoke();
+
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
// The call is not needed if NULL is stored in the field.
ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
@@ -661,6 +664,14 @@
// Request asynchronous GC.
void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
+ // Whether or not we may use a garbage collector, used so that we only create collectors we need.
+ bool MayUseCollector(CollectorType type) const;
+
+ // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
+ void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
+ min_interval_homogeneous_space_compaction_by_oom_ = interval;
+ }
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -721,6 +732,7 @@
// an initial allocation attempt failed.
mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes,
size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated,
mirror::Class** klass)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -739,7 +751,8 @@
template <const bool kInstrumented, const bool kGrow>
ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type,
size_t alloc_size, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
@@ -995,6 +1008,13 @@
// Bytes which are allocated and managed by native code but still need to be accounted for.
Atomic<size_t> native_bytes_allocated_;
+ // Number of bytes freed by thread local buffer revokes. This will
+ // cancel out the ahead-of-time bulk counting of bytes allocated in
+ // rosalloc thread-local buffers. It is temporarily accumulated
+ // here to be subtracted from num_bytes_allocated_ later at the next
+ // GC.
+ Atomic<size_t> num_bytes_freed_revoke_;
+
// Info related to the current or previous GC iteration.
collector::Iteration current_gc_iteration_;
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 9f1f953..14a93d1 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -24,7 +24,8 @@
namespace space {
inline mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
num_bytes = RoundUp(num_bytes, kAlignment);
mirror::Object* ret = AllocNonvirtual(num_bytes);
if (LIKELY(ret != nullptr)) {
@@ -32,13 +33,15 @@
if (usable_size != nullptr) {
*usable_size = num_bytes;
}
+ *bytes_tl_bulk_allocated = num_bytes;
}
return ret;
}
inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
Locks::mutator_lock_->AssertExclusiveHeld(self);
num_bytes = RoundUp(num_bytes, kAlignment);
uint8_t* end = end_.LoadRelaxed();
@@ -54,6 +57,7 @@
if (UNLIKELY(usable_size != nullptr)) {
*usable_size = num_bytes;
}
+ *bytes_tl_bulk_allocated = num_bytes;
return obj;
}
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index fbfc449..1303d77 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -93,12 +93,13 @@
return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
}
-void BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
+size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
MutexLock mu(Thread::Current(), block_lock_);
RevokeThreadLocalBuffersLocked(thread);
+ return 0U;
}
-void BumpPointerSpace::RevokeAllThreadLocalBuffers() {
+size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -107,6 +108,7 @@
for (Thread* thread : thread_list) {
RevokeThreadLocalBuffers(thread);
}
+ return 0U;
}
void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 089ede4..c496a42 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -47,10 +47,10 @@
// Allocate num_bytes, returns nullptr if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(size_t num_bytes);
@@ -103,9 +103,9 @@
void Dump(std::ostream& os) const;
- void RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_);
- void RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
- Locks::thread_list_lock_);
+ size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_);
+ size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
+ Locks::thread_list_lock_);
void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(block_lock_);
void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
Locks::thread_list_lock_);
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index 4c8a35e..9eace89 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -27,11 +27,13 @@
inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_bytes,
size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
mirror::Object* obj;
{
MutexLock mu(self, lock_);
- obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size);
+ obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
if (LIKELY(obj != NULL)) {
// Zero freshly allocated memory, done while not holding the space's lock.
@@ -49,9 +51,11 @@
return size + kChunkOverhead;
}
-inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(Thread* /*self*/, size_t num_bytes,
- size_t* bytes_allocated,
- size_t* usable_size) {
+inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(
+ Thread* /*self*/, size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
if (LIKELY(result != NULL)) {
if (kDebugSpaces) {
@@ -61,6 +65,7 @@
size_t allocation_size = AllocationSizeNonvirtual(result, usable_size);
DCHECK(bytes_allocated != NULL);
*bytes_allocated = allocation_size;
+ *bytes_tl_bulk_allocated = allocation_size;
}
return result;
}
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index b8a9dd6..225861d 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -123,7 +123,8 @@
}
mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) {
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
mirror::Object* result;
{
MutexLock mu(self, lock_);
@@ -131,7 +132,8 @@
size_t max_allowed = Capacity();
mspace_set_footprint_limit(mspace_, max_allowed);
// Try the allocation.
- result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size);
+ result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
// Shrink back down as small as possible.
size_t footprint = mspace_footprint(mspace_);
mspace_set_footprint_limit(mspace_, footprint);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 6ce138c..1f80f1f 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -48,11 +48,15 @@
// Virtual to allow ValgrindMallocSpace to intercept.
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_);
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
+ OVERRIDE LOCKS_EXCLUDED(lock_);
// Virtual to allow ValgrindMallocSpace to intercept.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_) {
- return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE LOCKS_EXCLUDED(lock_) {
+ return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
// Virtual to allow ValgrindMallocSpace to intercept.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
@@ -67,15 +71,22 @@
LOCKS_EXCLUDED(lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // DlMallocSpaces don't have thread local state.
- void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ return num_bytes;
}
- void RevokeAllThreadLocalBuffers() OVERRIDE {
+
+ // DlMallocSpaces don't have thread local state.
+ size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ return 0U;
+ }
+ size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ return 0U;
}
// Faster non-virtual allocation path.
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) LOCKS_EXCLUDED(lock_);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ LOCKS_EXCLUDED(lock_);
// Faster non-virtual allocation size path.
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size);
@@ -134,7 +145,8 @@
private:
mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 7523de5..5c8e4b9 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -38,10 +38,11 @@
}
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE {
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE {
mirror::Object* obj =
LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
@@ -108,7 +109,8 @@
}
mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) {
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
PROT_READ | PROT_WRITE, true, false, &error_msg);
@@ -131,6 +133,8 @@
if (usable_size != nullptr) {
*usable_size = allocation_size;
}
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
+ *bytes_tl_bulk_allocated = allocation_size;
num_bytes_allocated_ += allocation_size;
total_bytes_allocated_ += allocation_size;
++num_objects_allocated_;
@@ -413,7 +417,7 @@
}
mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
MutexLock mu(self, lock_);
const size_t allocation_size = RoundUp(num_bytes, kAlignment);
AllocationInfo temp_info;
@@ -451,6 +455,8 @@
if (usable_size != nullptr) {
*usable_size = allocation_size;
}
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
+ *bytes_tl_bulk_allocated = allocation_size;
// Need to do these inside of the lock.
++num_objects_allocated_;
++total_objects_allocated_;
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 847f575..d1f9386 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -62,9 +62,11 @@
}
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
// LargeObjectSpaces don't have thread local state.
- void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ return 0U;
}
- void RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ return 0U;
}
bool IsAllocSpace() const OVERRIDE {
return true;
@@ -124,7 +126,7 @@
// Return the storage space required by obj.
size_t AllocationSize(mirror::Object* obj, size_t* usable_size);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated);
size_t Free(Thread* self, mirror::Object* ptr);
void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
@@ -153,7 +155,7 @@
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
void Dump(std::ostream& os) const;
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index e17bad8..a261663 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -49,11 +49,13 @@
while (requests.size() < num_allocations) {
size_t request_size = test_rand(&rand_seed) % max_allocation_size;
size_t allocation_size = 0;
+ size_t bytes_tl_bulk_allocated;
mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size,
- nullptr);
+ nullptr, &bytes_tl_bulk_allocated);
ASSERT_TRUE(obj != nullptr);
ASSERT_EQ(allocation_size, los->AllocationSize(obj, nullptr));
ASSERT_GE(allocation_size, request_size);
+ ASSERT_EQ(allocation_size, bytes_tl_bulk_allocated);
// Fill in our magic value.
uint8_t magic = (request_size & 0xFF) | 1;
memset(obj, magic, request_size);
@@ -83,9 +85,10 @@
// Test that dump doesn't crash.
los->Dump(LOG(INFO));
- size_t bytes_allocated = 0;
+ size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
// Checks that the coalescing works.
- mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated, nullptr);
+ mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated, nullptr,
+ &bytes_tl_bulk_allocated);
EXPECT_TRUE(obj != nullptr);
los->Free(Thread::Current(), obj);
@@ -102,8 +105,9 @@
void Run(Thread* self) {
for (size_t i = 0; i < iterations_ ; ++i) {
- size_t alloc_size;
- mirror::Object* ptr = los_->Alloc(self, size_, &alloc_size, nullptr);
+ size_t alloc_size, bytes_tl_bulk_allocated;
+ mirror::Object* ptr = los_->Alloc(self, size_, &alloc_size, nullptr,
+ &bytes_tl_bulk_allocated);
NanoSleep((id_ + 3) * 1000); // (3+id) mu s
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 06239e5..bbf1bbb 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -55,10 +55,11 @@
// Allocate num_bytes allowing the underlying space to grow.
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) = 0;
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) = 0;
// Allocate num_bytes without allowing the underlying space to grow.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) = 0;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
// Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
// amount of the storage space that may be used by obj.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
@@ -67,6 +68,11 @@
virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ // Returns the maximum bytes that could be allocated for the given
+ // size in bulk, that is the maximum value for the
+ // bytes_allocated_bulk out param returned by MallocSpace::Alloc().
+ virtual size_t MaxBytesBulkAllocatedFor(size_t num_bytes) = 0;
+
#ifndef NDEBUG
virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build.
#else
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index a4ed718..1cdf69d 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -24,30 +24,36 @@
namespace space {
inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
num_bytes = RoundUp(num_bytes, kAlignment);
- return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size);
+ return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
Locks::mutator_lock_->AssertExclusiveHeld(self);
- return Alloc(self, num_bytes, bytes_allocated, usable_size);
+ return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
}
template<bool kForEvac>
inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
DCHECK(IsAligned<kAlignment>(num_bytes));
mirror::Object* obj;
if (LIKELY(num_bytes <= kRegionSize)) {
// Non-large object.
if (!kForEvac) {
- obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
} else {
DCHECK(evac_region_ != nullptr);
- obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
if (LIKELY(obj != nullptr)) {
return obj;
@@ -55,9 +61,11 @@
MutexLock mu(Thread::Current(), region_lock_);
// Retry with current region since another thread may have updated it.
if (!kForEvac) {
- obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
} else {
- obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
if (LIKELY(obj != nullptr)) {
return obj;
@@ -73,7 +81,7 @@
r->Unfree(time_);
r->SetNewlyAllocated();
++num_non_free_regions_;
- obj = r->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
CHECK(obj != nullptr);
current_region_ = r;
return obj;
@@ -85,7 +93,7 @@
if (r->IsFree()) {
r->Unfree(time_);
++num_non_free_regions_;
- obj = r->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
CHECK(obj != nullptr);
evac_region_ = r;
return obj;
@@ -94,7 +102,8 @@
}
} else {
// Large object.
- obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size);
+ obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
if (LIKELY(obj != nullptr)) {
return obj;
}
@@ -103,7 +112,8 @@
}
inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
DCHECK(IsAllocated() && IsInToSpace());
DCHECK(IsAligned<kAlignment>(num_bytes));
Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
@@ -124,6 +134,7 @@
if (usable_size != nullptr) {
*usable_size = num_bytes;
}
+ *bytes_tl_bulk_allocated = num_bytes;
return reinterpret_cast<mirror::Object*>(old_top);
}
@@ -253,7 +264,8 @@
template<bool kForEvac>
mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
DCHECK(IsAligned<kAlignment>(num_bytes));
DCHECK_GT(num_bytes, kRegionSize);
size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
@@ -300,6 +312,7 @@
if (usable_size != nullptr) {
*usable_size = num_regs * kRegionSize;
}
+ *bytes_tl_bulk_allocated = num_bytes;
return reinterpret_cast<mirror::Object*>(first_reg->Begin());
} else {
// right points to the non-free region. Start with the one after it.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 8bb73d6..814ab6c 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -76,7 +76,7 @@
current_region_ = &full_region_;
evac_region_ = nullptr;
size_t ignored;
- DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr) == nullptr);
+ DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
}
size_t RegionSpace::FromSpaceSize() {
@@ -356,9 +356,10 @@
return false;
}
-void RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
+size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
MutexLock mu(Thread::Current(), region_lock_);
RevokeThreadLocalBuffersLocked(thread);
+ return 0U;
}
void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
@@ -377,7 +378,7 @@
thread->SetTlab(nullptr, nullptr);
}
-void RegionSpace::RevokeAllThreadLocalBuffers() {
+size_t RegionSpace::RevokeAllThreadLocalBuffers() {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -385,6 +386,7 @@
for (Thread* thread : thread_list) {
RevokeThreadLocalBuffers(thread);
}
+ return 0U;
}
void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 4160547..b88ce24 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -42,18 +42,20 @@
// Allocate num_bytes, returns nullptr if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// The main allocation routine.
template<bool kForEvac>
ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size);
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated);
// Allocate/free large objects (objects that are larger than the region size.)
template<bool kForEvac>
- mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size);
+ mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated);
void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated);
// Return the storage space required by obj.
@@ -87,10 +89,10 @@
void DumpRegions(std::ostream& os);
void DumpNonFreeRegions(std::ostream& os);
- void RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
+ size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_);
- void RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
- Locks::thread_list_lock_);
+ size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
+ Locks::thread_list_lock_);
void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_);
void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
Locks::thread_list_lock_);
@@ -269,7 +271,8 @@
}
ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size);
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated);
bool IsFree() const {
bool is_free = state_ == RegionState::kRegionStateFree;
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index 5d6642d..9d582a3 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -26,13 +26,19 @@
namespace gc {
namespace space {
+template<bool kMaybeRunningOnValgrind>
inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
// obj is a valid object. Use its class in the header to get the size.
// Don't use verification since the object may be dead if we are sweeping.
size_t size = obj->SizeOf<kVerifyNone>();
- bool running_on_valgrind = RUNNING_ON_VALGRIND != 0;
- if (running_on_valgrind) {
- size += 2 * kDefaultValgrindRedZoneBytes;
+ bool running_on_valgrind = false;
+ if (kMaybeRunningOnValgrind) {
+ running_on_valgrind = RUNNING_ON_VALGRIND != 0;
+ if (running_on_valgrind) {
+ size += 2 * kDefaultValgrindRedZoneBytes;
+ }
+ } else {
+ DCHECK_EQ(RUNNING_ON_VALGRIND, 0U);
}
size_t size_by_size = rosalloc_->UsableSize(size);
if (kIsDebugBuild) {
@@ -55,28 +61,50 @@
template<bool kThreadSafe>
inline mirror::Object* RosAllocSpace::AllocCommon(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) {
- size_t rosalloc_size = 0;
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
+ size_t rosalloc_bytes_allocated = 0;
+ size_t rosalloc_usable_size = 0;
+ size_t rosalloc_bytes_tl_bulk_allocated = 0;
if (!kThreadSafe) {
Locks::mutator_lock_->AssertExclusiveHeld(self);
}
mirror::Object* result = reinterpret_cast<mirror::Object*>(
- rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_size));
+ rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_bytes_allocated,
+ &rosalloc_usable_size,
+ &rosalloc_bytes_tl_bulk_allocated));
if (LIKELY(result != NULL)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
DCHECK(bytes_allocated != NULL);
- *bytes_allocated = rosalloc_size;
- DCHECK_EQ(rosalloc_size, rosalloc_->UsableSize(result));
+ *bytes_allocated = rosalloc_bytes_allocated;
+ DCHECK_EQ(rosalloc_usable_size, rosalloc_->UsableSize(result));
if (usable_size != nullptr) {
- *usable_size = rosalloc_size;
+ *usable_size = rosalloc_usable_size;
}
+ DCHECK(bytes_tl_bulk_allocated != NULL);
+ *bytes_tl_bulk_allocated = rosalloc_bytes_tl_bulk_allocated;
}
return result;
}
+inline bool RosAllocSpace::CanAllocThreadLocal(Thread* self, size_t num_bytes) {
+ return rosalloc_->CanAllocFromThreadLocalRun(self, num_bytes);
+}
+
+inline mirror::Object* RosAllocSpace::AllocThreadLocal(Thread* self, size_t num_bytes,
+ size_t* bytes_allocated) {
+ DCHECK(bytes_allocated != nullptr);
+ return reinterpret_cast<mirror::Object*>(
+ rosalloc_->AllocFromThreadLocalRun(self, num_bytes, bytes_allocated));
+}
+
+inline size_t RosAllocSpace::MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes) {
+ return rosalloc_->MaxBytesBulkAllocatedFor(num_bytes);
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index ced25a4..f140021 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -154,7 +154,8 @@
}
mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) {
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
mirror::Object* result;
{
MutexLock mu(self, lock_);
@@ -162,7 +163,8 @@
size_t max_allowed = Capacity();
rosalloc_->SetFootprintLimit(max_allowed);
// Try the allocation.
- result = AllocCommon(self, num_bytes, bytes_allocated, usable_size);
+ result = AllocCommon(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
// Shrink back down as small as possible.
size_t footprint = rosalloc_->Footprint();
rosalloc_->SetFootprintLimit(footprint);
@@ -209,7 +211,7 @@
__builtin_prefetch(reinterpret_cast<char*>(ptrs[i + kPrefetchLookAhead]));
}
if (kVerifyFreedBytes) {
- verify_bytes += AllocationSizeNonvirtual(ptrs[i], nullptr);
+ verify_bytes += AllocationSizeNonvirtual<true>(ptrs[i], nullptr);
}
}
@@ -338,12 +340,12 @@
}
}
-void RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
- rosalloc_->RevokeThreadLocalRuns(thread);
+size_t RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
+ return rosalloc_->RevokeThreadLocalRuns(thread);
}
-void RosAllocSpace::RevokeAllThreadLocalBuffers() {
- rosalloc_->RevokeAllThreadLocalRuns();
+size_t RosAllocSpace::RevokeAllThreadLocalBuffers() {
+ return rosalloc_->RevokeAllThreadLocalRuns();
}
void RosAllocSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index c856e95..36268f7 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -47,18 +47,21 @@
bool low_memory_mode, bool can_move_objects);
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE LOCKS_EXCLUDED(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE {
- return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
+ return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size);
+ return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
- return AllocationSizeNonvirtual(obj, usable_size);
+ return AllocationSizeNonvirtual<true>(obj, usable_size);
}
size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -66,17 +69,33 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
// RosAlloc zeroes memory internally.
- return AllocCommon(self, num_bytes, bytes_allocated, usable_size);
+ return AllocCommon(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
mirror::Object* AllocNonvirtualThreadUnsafe(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) {
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
// RosAlloc zeroes memory internally. Pass in false for thread unsafe.
- return AllocCommon<false>(self, num_bytes, bytes_allocated, usable_size);
+ return AllocCommon<false>(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
+ // Returns true if the given allocation request can be allocated in
+ // an existing thread local run without allocating a new run.
+ ALWAYS_INLINE bool CanAllocThreadLocal(Thread* self, size_t num_bytes);
+ // Allocate the given allocation request in an existing thread local
+ // run without allocating a new run.
+ ALWAYS_INLINE mirror::Object* AllocThreadLocal(Thread* self, size_t num_bytes,
+ size_t* bytes_allocated);
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ return MaxBytesBulkAllocatedForNonvirtual(num_bytes);
+ }
+ ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
+
// TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
+ template<bool kMaybeRunningOnValgrind>
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
NO_THREAD_SAFETY_ANALYSIS;
@@ -99,8 +118,8 @@
uint64_t GetBytesAllocated() OVERRIDE;
uint64_t GetObjectsAllocated() OVERRIDE;
- void RevokeThreadLocalBuffers(Thread* thread);
- void RevokeAllThreadLocalBuffers();
+ size_t RevokeThreadLocalBuffers(Thread* thread);
+ size_t RevokeAllThreadLocalBuffers();
void AssertThreadLocalBuffersAreRevoked(Thread* thread);
void AssertAllThreadLocalBuffersAreRevoked();
@@ -134,7 +153,7 @@
private:
template<bool kThreadSafe = true>
mirror::Object* AllocCommon(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
size_t maximum_size, bool low_memory_mode) OVERRIDE {
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index d24650b..f2378d9 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -203,14 +203,24 @@
// succeeds, the output parameter bytes_allocated will be set to the
// actually allocated bytes which is >= num_bytes.
// Alloc can be called from multiple threads at the same time and must be thread-safe.
+ //
+ // bytes_tl_bulk_allocated - bytes allocated in bulk ahead of time for a thread local allocation,
+ // if applicable. It can be
+ // 1) equal to bytes_allocated if it's not a thread local allocation,
+ // 2) greater than bytes_allocated if it's a thread local
+ // allocation that required a new buffer, or
+ // 3) zero if it's a thread local allocation in an existing
+ // buffer.
+ // This is what is to be added to Heap::num_bytes_allocated_.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) = 0;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return Alloc(self, num_bytes, bytes_allocated, usable_size);
+ return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
}
// Return the storage space required by obj.
@@ -224,11 +234,15 @@
// Revoke any sort of thread-local buffers that are used to speed up allocations for the given
// thread, if the alloc space implementation uses any.
- virtual void RevokeThreadLocalBuffers(Thread* thread) = 0;
+ // Returns the total free bytes in the revoked thread local runs that's to be subtracted
+ // from Heap::num_bytes_allocated_ or zero if unnecessary.
+ virtual size_t RevokeThreadLocalBuffers(Thread* thread) = 0;
// Revoke any sort of thread-local buffers that are used to speed up allocations for all the
// threads, if the alloc space implementation uses any.
- virtual void RevokeAllThreadLocalBuffers() = 0;
+ // Returns the total free bytes in the revoked thread local runs that's to be subtracted
+ // from Heap::num_bytes_allocated_ or zero if unnecessary.
+ virtual size_t RevokeAllThreadLocalBuffers() = 0;
virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 09d10dd..3e9e9f7 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -61,11 +61,13 @@
}
mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
- size_t* bytes_allocated, size_t* usable_size)
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
- mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size);
+ mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
if (obj != nullptr) {
InstallClass(obj, byte_array_class.Get(), bytes);
}
@@ -73,11 +75,13 @@
}
mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
- size_t* bytes_allocated, size_t* usable_size)
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
- mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size);
+ mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
if (obj != nullptr) {
InstallClass(obj, byte_array_class.Get(), bytes);
}
@@ -182,34 +186,38 @@
ScopedObjectAccess soa(self);
// Succeeds, fits without adjusting the footprint limit.
- size_t ptr1_bytes_allocated, ptr1_usable_size;
+ size_t ptr1_bytes_allocated, ptr1_usable_size, ptr1_bytes_tl_bulk_allocated;
StackHandleScope<3> hs(soa.Self());
MutableHandle<mirror::Object> ptr1(
- hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+ hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
+ &ptr1_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
+ EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
- size_t ptr3_bytes_allocated, ptr3_usable_size;
+ size_t ptr3_bytes_allocated, ptr3_usable_size, ptr3_bytes_tl_bulk_allocated;
MutableHandle<mirror::Object> ptr3(
- hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+ hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
+ &ptr3_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
EXPECT_LE(8U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
+ EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr4 == nullptr);
// Also fails, requires a higher allowed footprint.
- mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
@@ -219,13 +227,15 @@
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
- size_t ptr6_bytes_allocated, ptr6_usable_size;
+ size_t ptr6_bytes_allocated, ptr6_usable_size, ptr6_bytes_tl_bulk_allocated;
Handle<mirror::Object> ptr6(
- hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+ hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size,
+ &ptr6_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr6.Get() != nullptr);
EXPECT_LE(9U * MB, ptr6_bytes_allocated);
EXPECT_LE(9U * MB, ptr6_usable_size);
EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
+ EXPECT_EQ(ptr6_bytes_tl_bulk_allocated, ptr6_bytes_allocated);
// Final clean up.
size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
@@ -233,7 +243,7 @@
EXPECT_LE(1U * MB, free1);
// Make sure that the zygote space isn't directly at the start of the space.
- EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr) != nullptr);
+ EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr, &dummy) != nullptr);
gc::Heap* heap = Runtime::Current()->GetHeap();
space::Space* old_space = space;
@@ -250,22 +260,26 @@
AddSpace(space, false);
// Succeeds, fits without adjusting the footprint limit.
- ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
+ ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
+ &ptr1_bytes_tl_bulk_allocated));
EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
+ EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
// Fails, requires a higher footprint limit.
- ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
+ ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
- ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
+ ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
+ &ptr3_bytes_tl_bulk_allocated));
EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(2U * MB, ptr3_bytes_allocated);
EXPECT_LE(2U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
+ EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
space->Free(self, ptr3.Assign(nullptr));
// Final clean up.
@@ -285,34 +299,38 @@
AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
- size_t ptr1_bytes_allocated, ptr1_usable_size;
+ size_t ptr1_bytes_allocated, ptr1_usable_size, ptr1_bytes_tl_bulk_allocated;
StackHandleScope<3> hs(soa.Self());
MutableHandle<mirror::Object> ptr1(
- hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+ hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
+ &ptr1_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
+ EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
- size_t ptr3_bytes_allocated, ptr3_usable_size;
+ size_t ptr3_bytes_allocated, ptr3_usable_size, ptr3_bytes_tl_bulk_allocated;
MutableHandle<mirror::Object> ptr3(
- hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+ hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
+ &ptr3_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
EXPECT_LE(8U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
+ EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr4 == nullptr);
// Also fails, requires a higher allowed footprint.
- mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
@@ -322,13 +340,15 @@
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
- size_t ptr6_bytes_allocated, ptr6_usable_size;
+ size_t ptr6_bytes_allocated, ptr6_usable_size, ptr6_bytes_tl_bulk_allocated;
Handle<mirror::Object> ptr6(
- hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+ hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size,
+ &ptr6_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr6.Get() != nullptr);
EXPECT_LE(9U * MB, ptr6_bytes_allocated);
EXPECT_LE(9U * MB, ptr6_usable_size);
EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
+ EXPECT_EQ(ptr6_bytes_tl_bulk_allocated, ptr6_bytes_allocated);
// Final clean up.
size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
@@ -348,14 +368,16 @@
// Succeeds, fits without adjusting the max allowed footprint.
mirror::Object* lots_of_objects[1024];
for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- size_t allocation_size, usable_size;
+ size_t allocation_size, usable_size, bytes_tl_bulk_allocated;
size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
- &usable_size);
+ &usable_size, &bytes_tl_bulk_allocated);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
size_t computed_usable_size;
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
EXPECT_EQ(usable_size, computed_usable_size);
+ EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
+ bytes_tl_bulk_allocated >= allocation_size);
}
// Release memory.
@@ -363,12 +385,15 @@
// Succeeds, fits by adjusting the max allowed footprint.
for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- size_t allocation_size, usable_size;
- lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size);
+ size_t allocation_size, usable_size, bytes_tl_bulk_allocated;
+ lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size,
+ &bytes_tl_bulk_allocated);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
size_t computed_usable_size;
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
EXPECT_EQ(usable_size, computed_usable_size);
+ EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
+ bytes_tl_bulk_allocated >= allocation_size);
}
// Release memory.
@@ -425,10 +450,13 @@
StackHandleScope<1> hs(soa.Self());
auto object(hs.NewHandle<mirror::Object>(nullptr));
size_t bytes_allocated = 0;
+ size_t bytes_tl_bulk_allocated;
if (round <= 1) {
- object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
+ object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr,
+ &bytes_tl_bulk_allocated));
} else {
- object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
+ object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr,
+ &bytes_tl_bulk_allocated));
}
footprint = space->GetFootprint();
EXPECT_GE(space->Size(), footprint); // invariant
@@ -441,6 +469,8 @@
} else {
EXPECT_GE(allocation_size, 8u);
}
+ EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
+ bytes_tl_bulk_allocated >= allocation_size);
amount_allocated += allocation_size;
break;
}
@@ -518,11 +548,13 @@
auto large_object(hs.NewHandle<mirror::Object>(nullptr));
size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
size_t bytes_allocated = 0;
+ size_t bytes_tl_bulk_allocated;
if (round <= 1) {
- large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
+ large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr,
+ &bytes_tl_bulk_allocated));
} else {
large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
- nullptr));
+ nullptr, &bytes_tl_bulk_allocated));
}
EXPECT_TRUE(large_object.Get() != nullptr);
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/valgrind_malloc_space-inl.h
index ae8e892..bc329e1 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/valgrind_malloc_space-inl.h
@@ -32,10 +32,15 @@
template <size_t kValgrindRedZoneBytes, bool kUseObjSizeForUsable>
inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes,
size_t bytes_allocated, size_t usable_size,
- size_t* bytes_allocated_out, size_t* usable_size_out) {
+ size_t bytes_tl_bulk_allocated,
+ size_t* bytes_allocated_out, size_t* usable_size_out,
+ size_t* bytes_tl_bulk_allocated_out) {
if (bytes_allocated_out != nullptr) {
*bytes_allocated_out = bytes_allocated;
}
+ if (bytes_tl_bulk_allocated_out != nullptr) {
+ *bytes_tl_bulk_allocated_out = bytes_tl_bulk_allocated;
+ }
// This cuts over-provision and is a trade-off between testing the over-provisioning code paths
// vs checking overflows in the regular paths.
@@ -82,20 +87,25 @@
kValgrindRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::AllocWithGrowth(
- Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out) {
+ Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+ size_t* bytes_tl_bulk_allocated_out) {
size_t bytes_allocated;
size_t usable_size;
+ size_t bytes_tl_bulk_allocated;
void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
- &bytes_allocated, &usable_size);
+ &bytes_allocated, &usable_size,
+ &bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
- kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
- bytes_allocated, usable_size,
- bytes_allocated_out,
- usable_size_out);
+ return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>(
+ obj_with_rdz, num_bytes,
+ bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated,
+ bytes_allocated_out,
+ usable_size_out,
+ bytes_tl_bulk_allocated_out);
}
template <typename S,
@@ -106,11 +116,13 @@
kValgrindRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::Alloc(
- Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out) {
+ Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+ size_t* bytes_tl_bulk_allocated_out) {
size_t bytes_allocated;
size_t usable_size;
+ size_t bytes_tl_bulk_allocated;
void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
- &bytes_allocated, &usable_size);
+ &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
@@ -118,8 +130,10 @@
return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated,
bytes_allocated_out,
- usable_size_out);
+ usable_size_out,
+ bytes_tl_bulk_allocated_out);
}
template <typename S,
@@ -130,20 +144,25 @@
kValgrindRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::AllocThreadUnsafe(
- Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out) {
+ Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+ size_t* bytes_tl_bulk_allocated_out) {
size_t bytes_allocated;
size_t usable_size;
+ size_t bytes_tl_bulk_allocated;
void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kValgrindRedZoneBytes,
- &bytes_allocated, &usable_size);
+ &bytes_allocated, &usable_size,
+ &bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
- kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
- bytes_allocated, usable_size,
- bytes_allocated_out,
- usable_size_out);
+ return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>(
+ obj_with_rdz, num_bytes,
+ bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated,
+ bytes_allocated_out,
+ usable_size_out,
+ bytes_tl_bulk_allocated_out);
}
template <typename S,
@@ -226,6 +245,17 @@
mem_map->Size() - initial_size);
}
+template <typename S,
+ size_t kValgrindRedZoneBytes,
+ bool kAdjustForRedzoneInAllocSize,
+ bool kUseObjSizeForUsable>
+size_t ValgrindMallocSpace<S,
+ kValgrindRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
+ return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kValgrindRedZoneBytes);
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index 707ea69..a6b010a 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -34,12 +34,13 @@
class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
public:
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE;
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
@@ -53,6 +54,8 @@
UNUSED(ptr);
}
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
+
template <typename... Params>
explicit ValgrindMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
virtual ~ValgrindMallocSpace() {}
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index a868e68..9e882a8 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -77,7 +77,7 @@
<< ",name=\"" << GetName() << "\"]";
}
-mirror::Object* ZygoteSpace::Alloc(Thread*, size_t, size_t*, size_t*) {
+mirror::Object* ZygoteSpace::Alloc(Thread*, size_t, size_t*, size_t*, size_t*) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 0cf4bb1..934a234 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -46,7 +46,7 @@
}
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
@@ -55,9 +55,11 @@
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
// ZygoteSpaces don't have thread local state.
- void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ return 0U;
}
- void RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ return 0U;
}
uint64_t GetBytesAllocated() {
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index d2e93bc..5a7b7e1 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1008,17 +1008,26 @@
HprofBasicType t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
__ AddStringId(LookupStringId(f->GetName()));
__ AddU1(t);
- switch (size) {
- case 1:
- __ AddU1(static_cast<uint8_t>(f->Get32(klass)));
+ switch (t) {
+ case hprof_basic_byte:
+ __ AddU1(f->GetByte(klass));
break;
- case 2:
- __ AddU2(static_cast<uint16_t>(f->Get32(klass)));
+ case hprof_basic_boolean:
+ __ AddU1(f->GetBoolean(klass));
break;
- case 4:
+ case hprof_basic_char:
+ __ AddU2(f->GetChar(klass));
+ break;
+ case hprof_basic_short:
+ __ AddU2(f->GetShort(klass));
+ break;
+ case hprof_basic_float:
+ case hprof_basic_int:
+ case hprof_basic_object:
__ AddU4(f->Get32(klass));
break;
- case 8:
+ case hprof_basic_double:
+ case hprof_basic_long:
__ AddU8(f->Get64(klass));
break;
default:
@@ -1099,16 +1108,29 @@
for (int i = 0; i < ifieldCount; ++i) {
mirror::ArtField* f = klass->GetInstanceField(i);
size_t size;
- SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
- if (size == 1) {
- __ AddU1(f->Get32(obj));
- } else if (size == 2) {
- __ AddU2(f->Get32(obj));
- } else if (size == 4) {
+ auto t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
+ switch (t) {
+ case hprof_basic_byte:
+ __ AddU1(f->GetByte(obj));
+ break;
+ case hprof_basic_boolean:
+ __ AddU1(f->GetBoolean(obj));
+ break;
+ case hprof_basic_char:
+ __ AddU2(f->GetChar(obj));
+ break;
+ case hprof_basic_short:
+ __ AddU2(f->GetShort(obj));
+ break;
+ case hprof_basic_float:
+ case hprof_basic_int:
+ case hprof_basic_object:
__ AddU4(f->Get32(obj));
- } else {
- CHECK_EQ(size, 8U);
+ break;
+ case hprof_basic_double:
+ case hprof_basic_long:
__ AddU8(f->Get64(obj));
+ break;
}
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index c94dab9..085062c 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -949,19 +949,16 @@
}
}
-void Instrumentation::ExceptionCaughtEvent(Thread* thread, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method,
- uint32_t catch_dex_pc,
+void Instrumentation::ExceptionCaughtEvent(Thread* thread,
mirror::Throwable* exception_object) const {
if (HasExceptionCaughtListeners()) {
- DCHECK_EQ(thread->GetException(nullptr), exception_object);
+ DCHECK_EQ(thread->GetException(), exception_object);
thread->ClearException();
std::shared_ptr<std::list<InstrumentationListener*>> original(exception_caught_listeners_);
for (InstrumentationListener* listener : *original.get()) {
- listener->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc,
- exception_object);
+ listener->ExceptionCaught(thread, exception_object);
}
- thread->SetException(throw_location, exception_object);
+ thread->SetException(exception_object);
}
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index b667a40..8972f3a 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -38,7 +38,6 @@
} // namespace mirror
union JValue;
class Thread;
-class ThrowLocation;
namespace instrumentation {
@@ -90,9 +89,7 @@
uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value) = 0;
// Call-back when an exception is caught.
- virtual void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object)
+ virtual void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Call-back for when we get a backward branch.
@@ -322,9 +319,7 @@
}
// Inform listeners that an exception was caught.
- void ExceptionCaughtEvent(Thread* thread, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object) const
+ void ExceptionCaughtEvent(Thread* thread, mirror::Throwable* exception_object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called when an instrumented method is entered. The intended link register (lr) is saved so
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 9d988e9..686b518 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -19,116 +19,13 @@
#include <limits>
#include "mirror/string-inl.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedLocalRef.h"
+#include "unstarted_runtime.h"
namespace art {
namespace interpreter {
-// Hand select a number of methods to be run in a not yet started runtime without using JNI.
-static void UnstartedRuntimeJni(Thread* self, ArtMethod* method,
- Object* receiver, uint32_t* args, JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string name(PrettyMethod(method));
- if (name == "java.lang.Object dalvik.system.VMRuntime.newUnpaddedArray(java.lang.Class, int)") {
- int32_t length = args[1];
- DCHECK_GE(length, 0);
- mirror::Class* element_class = reinterpret_cast<Object*>(args[0])->AsClass();
- Runtime* runtime = Runtime::Current();
- mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(self, &element_class);
- DCHECK(array_class != nullptr);
- gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
- result->SetL(mirror::Array::Alloc<true, true>(self, array_class, length,
- array_class->GetComponentSizeShift(), allocator));
- } else if (name == "java.lang.ClassLoader dalvik.system.VMStack.getCallingClassLoader()") {
- result->SetL(NULL);
- } else if (name == "java.lang.Class dalvik.system.VMStack.getStackClass2()") {
- NthCallerVisitor visitor(self, 3);
- visitor.WalkStack();
- result->SetL(visitor.caller->GetDeclaringClass());
- } else if (name == "double java.lang.Math.log(double)") {
- JValue value;
- value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
- result->SetD(log(value.GetD()));
- } else if (name == "java.lang.String java.lang.Class.getNameNative()") {
- StackHandleScope<1> hs(self);
- result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass())));
- } else if (name == "int java.lang.Float.floatToRawIntBits(float)") {
- result->SetI(args[0]);
- } else if (name == "float java.lang.Float.intBitsToFloat(int)") {
- result->SetI(args[0]);
- } else if (name == "double java.lang.Math.exp(double)") {
- JValue value;
- value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
- result->SetD(exp(value.GetD()));
- } else if (name == "java.lang.Object java.lang.Object.internalClone()") {
- result->SetL(receiver->Clone(self));
- } else if (name == "void java.lang.Object.notifyAll()") {
- receiver->NotifyAll(self);
- } else if (name == "int java.lang.String.compareTo(java.lang.String)") {
- String* rhs = reinterpret_cast<Object*>(args[0])->AsString();
- CHECK(rhs != NULL);
- result->SetI(receiver->AsString()->CompareTo(rhs));
- } else if (name == "java.lang.String java.lang.String.intern()") {
- result->SetL(receiver->AsString()->Intern());
- } else if (name == "int java.lang.String.fastIndexOf(int, int)") {
- result->SetI(receiver->AsString()->FastIndexOf(args[0], args[1]));
- } else if (name == "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])") {
- StackHandleScope<2> hs(self);
- auto h_class(hs.NewHandle(reinterpret_cast<mirror::Class*>(args[0])->AsClass()));
- auto h_dimensions(hs.NewHandle(reinterpret_cast<mirror::IntArray*>(args[1])->AsIntArray()));
- result->SetL(Array::CreateMultiArray(self, h_class, h_dimensions));
- } else if (name == "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()") {
- ScopedObjectAccessUnchecked soa(self);
- if (Runtime::Current()->IsActiveTransaction()) {
- result->SetL(soa.Decode<Object*>(self->CreateInternalStackTrace<true>(soa)));
- } else {
- result->SetL(soa.Decode<Object*>(self->CreateInternalStackTrace<false>(soa)));
- }
- } else if (name == "int java.lang.System.identityHashCode(java.lang.Object)") {
- mirror::Object* obj = reinterpret_cast<Object*>(args[0]);
- result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
- } else if (name == "boolean java.nio.ByteOrder.isLittleEndian()") {
- result->SetZ(JNI_TRUE);
- } else if (name == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
- Object* obj = reinterpret_cast<Object*>(args[0]);
- jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
- jint expectedValue = args[3];
- jint newValue = args[4];
- bool success;
- if (Runtime::Current()->IsActiveTransaction()) {
- success = obj->CasFieldStrongSequentiallyConsistent32<true>(MemberOffset(offset),
- expectedValue, newValue);
- } else {
- success = obj->CasFieldStrongSequentiallyConsistent32<false>(MemberOffset(offset),
- expectedValue, newValue);
- }
- result->SetZ(success ? JNI_TRUE : JNI_FALSE);
- } else if (name == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") {
- Object* obj = reinterpret_cast<Object*>(args[0]);
- jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
- Object* newValue = reinterpret_cast<Object*>(args[3]);
- if (Runtime::Current()->IsActiveTransaction()) {
- obj->SetFieldObject<true>(MemberOffset(offset), newValue);
- } else {
- obj->SetFieldObject<false>(MemberOffset(offset), newValue);
- }
- } else if (name == "int sun.misc.Unsafe.getArrayBaseOffsetForComponentType(java.lang.Class)") {
- mirror::Class* component = reinterpret_cast<Object*>(args[0])->AsClass();
- Primitive::Type primitive_type = component->GetPrimitiveType();
- result->SetI(mirror::Array::DataOffset(Primitive::ComponentSize(primitive_type)).Int32Value());
- } else if (name == "int sun.misc.Unsafe.getArrayIndexScaleForComponentType(java.lang.Class)") {
- mirror::Class* component = reinterpret_cast<Object*>(args[0])->AsClass();
- Primitive::Type primitive_type = component->GetPrimitiveType();
- result->SetI(Primitive::ComponentSize(primitive_type));
- } else if (Runtime::Current()->IsActiveTransaction()) {
- AbortTransaction(self, "Attempt to invoke native method in non-started runtime: %s",
- name.c_str());
-
- } else {
- LOG(FATAL) << "Calling native method " << PrettyMethod(method) << " in an unstarted "
- "non-transactional runtime";
- }
-}
-
static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty,
Object* receiver, uint32_t* args, JValue* result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 604e133..26ab602 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -19,12 +19,13 @@
#include <cmath>
#include "mirror/array-inl.h"
+#include "unstarted_runtime.h"
namespace art {
namespace interpreter {
-void ThrowNullPointerExceptionFromInterpreter(const ShadowFrame& shadow_frame) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+void ThrowNullPointerExceptionFromInterpreter() {
+ ThrowNullPointerExceptionFromDexPC();
}
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
@@ -44,7 +45,7 @@
} else {
obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true);
+ ThrowNullPointerExceptionForFieldAccess(f, true);
return false;
}
}
@@ -126,7 +127,7 @@
if (UNLIKELY(obj == nullptr)) {
// We lost the reference to the field index so we cannot get a more
// precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromDexPC();
return false;
}
MemberOffset field_offset(inst->VRegC_22c());
@@ -238,8 +239,7 @@
} else {
obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(),
- f, false);
+ ThrowNullPointerExceptionForFieldAccess(f, false);
return false;
}
}
@@ -289,8 +289,7 @@
if (!reg->VerifierInstanceOf(field_class)) {
// This should never happen.
std::string temp1, temp2, temp3;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Put '%s' that is not instance of field '%s' in '%s'",
reg->GetClass()->GetDescriptor(&temp1),
field_class->GetDescriptor(&temp2),
@@ -346,7 +345,7 @@
if (UNLIKELY(obj == nullptr)) {
// We lost the reference to the field index so we cannot get a more
// precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromDexPC();
return false;
}
MemberOffset field_offset(inst->VRegC_22c());
@@ -413,90 +412,16 @@
#undef EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL
#undef EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL
-/**
- * Finds the location where this exception will be caught. We search until we reach either the top
- * frame or a native frame, in which cases this exception is considered uncaught.
- */
-class CatchLocationFinder : public StackVisitor {
- public:
- explicit CatchLocationFinder(Thread* self, Handle<mirror::Throwable>* exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, nullptr), self_(self), handle_scope_(self), exception_(exception),
- catch_method_(handle_scope_.NewHandle<mirror::ArtMethod>(nullptr)),
- catch_dex_pc_(DexFile::kDexNoIndex), clear_exception_(false) {
- }
-
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = GetMethod();
- if (method == nullptr) {
- return true;
- }
- if (method->IsRuntimeMethod()) {
- // Ignore callee save method.
- DCHECK(method->IsCalleeSaveMethod());
- return true;
- }
- if (method->IsNative()) {
- return false; // End stack walk.
- }
- DCHECK(!method->IsNative());
- uint32_t dex_pc = GetDexPc();
- if (dex_pc != DexFile::kDexNoIndex) {
- uint32_t found_dex_pc;
- {
- StackHandleScope<3> hs(self_);
- Handle<mirror::Class> exception_class(hs.NewHandle((*exception_)->GetClass()));
- Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
- found_dex_pc = mirror::ArtMethod::FindCatchBlock(h_method, exception_class, dex_pc,
- &clear_exception_);
- }
- if (found_dex_pc != DexFile::kDexNoIndex) {
- catch_method_.Assign(method);
- catch_dex_pc_ = found_dex_pc;
- return false; // End stack walk.
- }
- }
- return true; // Continue stack walk.
- }
-
- ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return catch_method_.Get();
- }
-
- uint32_t GetCatchDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return catch_dex_pc_;
- }
-
- bool NeedClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return clear_exception_;
- }
-
- private:
- Thread* const self_;
- StackHandleScope<1> handle_scope_;
- Handle<mirror::Throwable>* exception_;
- MutableHandle<mirror::ArtMethod> catch_method_;
- uint32_t catch_dex_pc_;
- bool clear_exception_;
-
-
- DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
-};
-
uint32_t FindNextInstructionFollowingException(Thread* self,
ShadowFrame& shadow_frame,
uint32_t dex_pc,
const instrumentation::Instrumentation* instrumentation) {
self->VerifyStack();
- ThrowLocation throw_location;
StackHandleScope<3> hs(self);
- Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException(&throw_location)));
+ Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException()));
if (instrumentation->HasExceptionCaughtListeners()
&& self->IsExceptionThrownByCurrentMethod(exception.Get())) {
- CatchLocationFinder clf(self, &exception);
- clf.WalkStack(false);
- instrumentation->ExceptionCaughtEvent(self, throw_location, clf.GetCatchMethod(),
- clf.GetCatchDexPc(), exception.Get());
+ instrumentation->ExceptionCaughtEvent(self, exception.Get());
}
bool clear_exception = false;
uint32_t found_dex_pc;
@@ -526,10 +451,6 @@
UNREACHABLE();
}
-static void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame,
size_t dest_reg, size_t src_reg)
@@ -629,8 +550,7 @@
if (!o->VerifierInstanceOf(arg_type)) {
// This should never happen.
std::string temp1, temp2;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Invoking %s with bad arg %d, type '%s' not instance of '%s'",
new_shadow_frame->GetMethod()->GetName(), shorty_pos,
o->GetClass()->GetDescriptor(&temp1),
@@ -732,8 +652,7 @@
ThrowRuntimeException("Bad filled array request for type %s",
PrettyDescriptor(componentClass).c_str());
} else {
- self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
- "Ljava/lang/InternalError;",
+ self->ThrowNewExceptionF("Ljava/lang/InternalError;",
"Found type %s; filled-new-array not implemented for anything but 'int'",
PrettyDescriptor(componentClass).c_str());
}
@@ -811,282 +730,6 @@
}
}
-// Helper function to deal with class loading in an unstarted runtime.
-static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> className,
- Handle<mirror::ClassLoader> class_loader, JValue* result,
- const std::string& method_name, bool initialize_class,
- bool abort_if_not_found)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(className.Get() != nullptr);
- std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str()));
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-
- Class* found = class_linker->FindClass(self, descriptor.c_str(), class_loader);
- if (found == nullptr && abort_if_not_found) {
- if (!self->IsExceptionPending()) {
- AbortTransaction(self, "%s failed in un-started runtime for class: %s",
- method_name.c_str(), PrettyDescriptor(descriptor.c_str()).c_str());
- }
- return;
- }
- if (found != nullptr && initialize_class) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(found));
- if (!class_linker->EnsureInitialized(self, h_class, true, true)) {
- CHECK(self->IsExceptionPending());
- return;
- }
- }
- result->SetL(found);
-}
-
-// Common helper for class-loading cutouts in an unstarted runtime. We call Runtime methods that
-// rely on Java code to wrap errors in the correct exception class (i.e., NoClassDefFoundError into
-// ClassNotFoundException), so need to do the same. The only exception is if the exception is
-// actually InternalError. This must not be wrapped, as it signals an initialization abort.
-static void CheckExceptionGenerateClassNotFound(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (self->IsExceptionPending()) {
- // If it is not an InternalError, wrap it.
- std::string type(PrettyTypeOf(self->GetException(nullptr)));
- if (type != "java.lang.InternalError") {
- self->ThrowNewWrappedException(self->GetCurrentLocationForThrow(),
- "Ljava/lang/ClassNotFoundException;",
- "ClassNotFoundException");
- }
- }
-}
-
-static void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame,
- JValue* result, size_t arg_offset) {
- // In a runtime that's not started we intercept certain methods to avoid complicated dependency
- // problems in core libraries.
- std::string name(PrettyMethod(shadow_frame->GetMethod()));
- if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
- StackHandleScope<1> hs(self);
- Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
- UnstartedRuntimeFindClass(self, h_class_name, NullHandle<mirror::ClassLoader>(), result, name,
- true, false);
- CheckExceptionGenerateClassNotFound(self);
- } else if (name == "java.lang.Class java.lang.Class.forName(java.lang.String, boolean, java.lang.ClassLoader)") {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
- bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
- mirror::ClassLoader* class_loader =
- down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
- StackHandleScope<2> hs(self);
- Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
- UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, initialize_class,
- false);
- CheckExceptionGenerateClassNotFound(self);
- } else if (name == "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)") {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
- bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
- mirror::ClassLoader* class_loader =
- down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
- StackHandleScope<2> hs(self);
- Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
- UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, initialize_class,
- false);
- CheckExceptionGenerateClassNotFound(self);
- } else if (name == "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
- mirror::ClassLoader* class_loader =
- down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset));
- StackHandleScope<2> hs(self);
- Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
- UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, false, false);
- // This might have an error pending. But semantics are to just return null.
- if (self->IsExceptionPending()) {
- // If it is an InternalError, keep it. See CheckExceptionGenerateClassNotFound.
- std::string type(PrettyTypeOf(self->GetException(nullptr)));
- if (type != "java.lang.InternalError") {
- self->ClearException();
- }
- }
- } else if (name == "java.lang.Class java.lang.Void.lookupType()") {
- result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
- } else if (name == "java.lang.Object java.lang.Class.newInstance()") {
- StackHandleScope<3> hs(self); // Class, constructor, object.
- Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- Handle<Class> h_klass(hs.NewHandle(klass));
- // There are two situations in which we'll abort this run.
- // 1) If the class isn't yet initialized and initialization fails.
- // 2) If we can't find the default constructor. We'll postpone the exception to runtime.
- // Note that 2) could likely be handled here, but for safety abort the transaction.
- bool ok = false;
- if (Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
- Handle<ArtMethod> h_cons(hs.NewHandle(h_klass->FindDeclaredDirectMethod("<init>", "()V")));
- if (h_cons.Get() != nullptr) {
- Handle<Object> h_obj(hs.NewHandle(klass->AllocObject(self)));
- CHECK(h_obj.Get() != nullptr); // We don't expect OOM at compile-time.
- EnterInterpreterFromInvoke(self, h_cons.Get(), h_obj.Get(), nullptr, nullptr);
- if (!self->IsExceptionPending()) {
- result->SetL(h_obj.Get());
- ok = true;
- }
- } else {
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
- "Could not find default constructor for '%s'",
- PrettyClass(h_klass.Get()).c_str());
- }
- }
- if (!ok) {
- std::string error_msg = StringPrintf("Failed in Class.newInstance for '%s' with %s",
- PrettyClass(h_klass.Get()).c_str(),
- PrettyTypeOf(self->GetException(nullptr)).c_str());
- self->ThrowNewWrappedException(self->GetCurrentLocationForThrow(),
- "Ljava/lang/InternalError;",
- error_msg.c_str());
- }
- } else if (name == "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") {
- // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
- // going the reflective Dex way.
- Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- String* name2 = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
- ArtField* found = NULL;
- ObjectArray<ArtField>* fields = klass->GetIFields();
- for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
- ArtField* f = fields->Get(i);
- if (name2->Equals(f->GetName())) {
- found = f;
- }
- }
- if (found == NULL) {
- fields = klass->GetSFields();
- for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
- ArtField* f = fields->Get(i);
- if (name2->Equals(f->GetName())) {
- found = f;
- }
- }
- }
- CHECK(found != NULL)
- << "Failed to find field in Class.getDeclaredField in un-started runtime. name="
- << name2->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
- // TODO: getDeclaredField calls GetType once the field is found to ensure a
- // NoClassDefFoundError is thrown if the field's type cannot be resolved.
- Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
- StackHandleScope<1> hs(self);
- Handle<Object> field(hs.NewHandle(jlr_Field->AllocNonMovableObject(self)));
- CHECK(field.Get() != NULL);
- ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>", "(Ljava/lang/reflect/ArtField;)V");
- uint32_t args[1];
- args[0] = StackReference<mirror::Object>::FromMirrorPtr(found).AsVRegValue();
- EnterInterpreterFromInvoke(self, c, field.Get(), args, NULL);
- result->SetL(field.Get());
- } else if (name == "int java.lang.Object.hashCode()") {
- Object* obj = shadow_frame->GetVRegReference(arg_offset);
- result->SetI(obj->IdentityHashCode());
- } else if (name == "java.lang.String java.lang.reflect.ArtMethod.getMethodName(java.lang.reflect.ArtMethod)") {
- mirror::ArtMethod* method = shadow_frame->GetVRegReference(arg_offset)->AsArtMethod();
- result->SetL(method->GetNameAsString(self));
- } else if (name == "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)" ||
- name == "void java.lang.System.arraycopy(char[], int, char[], int, int)") {
- // Special case array copying without initializing System.
- Class* ctype = shadow_frame->GetVRegReference(arg_offset)->GetClass()->GetComponentType();
- jint srcPos = shadow_frame->GetVReg(arg_offset + 1);
- jint dstPos = shadow_frame->GetVReg(arg_offset + 3);
- jint length = shadow_frame->GetVReg(arg_offset + 4);
- if (!ctype->IsPrimitive()) {
- ObjectArray<Object>* src = shadow_frame->GetVRegReference(arg_offset)->AsObjectArray<Object>();
- ObjectArray<Object>* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<Object>();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else if (ctype->IsPrimitiveChar()) {
- CharArray* src = shadow_frame->GetVRegReference(arg_offset)->AsCharArray();
- CharArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else if (ctype->IsPrimitiveInt()) {
- IntArray* src = shadow_frame->GetVRegReference(arg_offset)->AsIntArray();
- IntArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsIntArray();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else {
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
- "Unimplemented System.arraycopy for type '%s'",
- PrettyDescriptor(ctype).c_str());
- }
- } else if (name == "long java.lang.Double.doubleToRawLongBits(double)") {
- double in = shadow_frame->GetVRegDouble(arg_offset);
- result->SetJ(bit_cast<int64_t>(in));
- } else if (name == "double java.lang.Math.ceil(double)") {
- double in = shadow_frame->GetVRegDouble(arg_offset);
- double out;
- // Special cases:
- // 1) NaN, infinity, +0, -0 -> out := in. All are guaranteed by cmath.
- // -1 < in < 0 -> out := -0.
- if (-1.0 < in && in < 0) {
- out = -0.0;
- } else {
- out = ceil(in);
- }
- result->SetD(out);
- } else if (name == "java.lang.Object java.lang.ThreadLocal.get()") {
- std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
- bool ok = false;
- if (caller == "java.lang.String java.lang.IntegralToString.convertInt(java.lang.AbstractStringBuilder, int)") {
- // Allocate non-threadlocal buffer.
- result->SetL(mirror::CharArray::Alloc(self, 11));
- ok = true;
- } else if (caller == "java.lang.RealToString java.lang.RealToString.getInstance()") {
- // Note: RealToString is implemented and used in a different fashion than IntegralToString.
- // Conversion is done over an actual object of RealToString (the conversion method is an
- // instance method). This means it is not as clear whether it is correct to return a new
- // object each time. The caller needs to be inspected by hand to see whether it (incorrectly)
- // stores the object for later use.
- // See also b/19548084 for a possible rewrite and bringing it in line with IntegralToString.
- if (shadow_frame->GetLink()->GetLink() != nullptr) {
- std::string caller2(PrettyMethod(shadow_frame->GetLink()->GetLink()->GetMethod()));
- if (caller2 == "java.lang.String java.lang.Double.toString(double)") {
- // Allocate new object.
- StackHandleScope<2> hs(self);
- Handle<Class> h_real_to_string_class(hs.NewHandle(
- shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
- Handle<Object> h_real_to_string_obj(hs.NewHandle(
- h_real_to_string_class->AllocObject(self)));
- if (h_real_to_string_obj.Get() != nullptr) {
- mirror::ArtMethod* init_method =
- h_real_to_string_class->FindDirectMethod("<init>", "()V");
- if (init_method == nullptr) {
- h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
- } else {
- JValue invoke_result;
- EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
- nullptr);
- if (!self->IsExceptionPending()) {
- result->SetL(h_real_to_string_obj.Get());
- ok = true;
- }
- }
- }
-
- if (!ok) {
- // We'll abort, so clear exception.
- self->ClearException();
- }
- }
- }
- }
-
- if (!ok) {
- self->ThrowNewException(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
- "Unimplemented ThreadLocal.get");
- }
- } else {
- // Not special, continue with regular interpreter execution.
- artInterpreterToInterpreterBridge(self, code_item, shadow_frame, result);
- }
-}
-
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 06b809f..15396d6 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -30,21 +30,14 @@
#include "common_throws.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
-#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils-inl.h"
-#include "gc/accounting/card_table-inl.h"
#include "handle_scope-inl.h"
-#include "nth_caller_visitor.h"
#include "mirror/art_field-inl.h"
-#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
-#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
-#include "ScopedLocalRef.h"
-#include "scoped_thread_state_change.h"
#include "thread.h"
#include "well_known_classes.h"
@@ -77,7 +70,7 @@
extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-void ThrowNullPointerExceptionFromInterpreter(const ShadowFrame& shadow_frame)
+void ThrowNullPointerExceptionFromInterpreter()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
@@ -138,7 +131,7 @@
if (UNLIKELY(receiver == nullptr)) {
// We lost the reference to the method index so we cannot get a more
// precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromDexPC();
return false;
}
const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 37324ea..5f97f94 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -244,7 +244,7 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MOVE_EXCEPTION) {
- Throwable* exception = self->GetException(nullptr);
+ Throwable* exception = self->GetException();
DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
self->ClearException();
@@ -341,8 +341,7 @@
if (!obj_result->VerifierInstanceOf(return_type)) {
// This should never happen.
std::string temp1, temp2;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Returning '%s' that is not instance of return type '%s'",
obj_result->GetClass()->GetDescriptor(&temp1),
return_type->GetDescriptor(&temp2));
@@ -465,7 +464,7 @@
HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorEnter(self, obj);
@@ -477,7 +476,7 @@
HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorExit(self, obj);
@@ -519,7 +518,7 @@
HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
if (UNLIKELY(array == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
@@ -596,16 +595,15 @@
HANDLE_INSTRUCTION_START(THROW) {
Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(exception == NULL)) {
- ThrowNullPointerException(NULL, "throw with null exception");
+ ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
std::string temp;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Throwing '%s' that is not instance of Throwable",
exception->GetClass()->GetDescriptor(&temp));
} else {
- self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
+ self->SetException(exception->AsThrowable());
}
HANDLE_PENDING_EXCEPTION();
}
@@ -972,7 +970,7 @@
HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -990,7 +988,7 @@
HANDLE_INSTRUCTION_START(AGET_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1008,7 +1006,7 @@
HANDLE_INSTRUCTION_START(AGET_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1026,7 +1024,7 @@
HANDLE_INSTRUCTION_START(AGET_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1044,7 +1042,7 @@
HANDLE_INSTRUCTION_START(AGET) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1062,7 +1060,7 @@
HANDLE_INSTRUCTION_START(AGET_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1080,7 +1078,7 @@
HANDLE_INSTRUCTION_START(AGET_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1098,7 +1096,7 @@
HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1117,7 +1115,7 @@
HANDLE_INSTRUCTION_START(APUT_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1136,7 +1134,7 @@
HANDLE_INSTRUCTION_START(APUT_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1155,7 +1153,7 @@
HANDLE_INSTRUCTION_START(APUT_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1174,7 +1172,7 @@
HANDLE_INSTRUCTION_START(APUT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1193,7 +1191,7 @@
HANDLE_INSTRUCTION_START(APUT_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
@@ -1212,7 +1210,7 @@
HANDLE_INSTRUCTION_START(APUT_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 2f85587..9313c75 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -163,7 +163,7 @@
break;
case Instruction::MOVE_EXCEPTION: {
PREAMBLE();
- Throwable* exception = self->GetException(nullptr);
+ Throwable* exception = self->GetException();
DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
self->ClearException();
@@ -248,8 +248,7 @@
if (!obj_result->VerifierInstanceOf(return_type)) {
// This should never happen.
std::string temp1, temp2;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Returning '%s' that is not instance of return type '%s'",
obj_result->GetClass()->GetDescriptor(&temp1),
return_type->GetDescriptor(&temp2));
@@ -370,7 +369,7 @@
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorEnter(self, obj);
@@ -382,7 +381,7 @@
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorExit(self, obj);
@@ -424,7 +423,7 @@
PREAMBLE();
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
if (UNLIKELY(array == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
@@ -506,16 +505,15 @@
PREAMBLE();
Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(exception == NULL)) {
- ThrowNullPointerException(NULL, "throw with null exception");
+ ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
std::string temp;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Throwing '%s' that is not instance of Throwable",
exception->GetClass()->GetDescriptor(&temp));
} else {
- self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
+ self->SetException(exception->AsThrowable());
}
HANDLE_PENDING_EXCEPTION();
break;
@@ -817,7 +815,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -835,7 +833,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -853,7 +851,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -871,7 +869,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -889,7 +887,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -907,7 +905,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -925,7 +923,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -943,7 +941,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -962,7 +960,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -981,7 +979,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1000,7 +998,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1019,7 +1017,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1038,7 +1036,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1057,7 +1055,7 @@
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
new file mode 100644
index 0000000..356a438
--- /dev/null
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -0,0 +1,951 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "unstarted_runtime.h"
+
+#include <cmath>
+#include <unordered_map>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "class_linker.h"
+#include "common_throws.h"
+#include "entrypoints/entrypoint_utils-inl.h"
+#include "handle_scope-inl.h"
+#include "interpreter/interpreter_common.h"
+#include "mirror/array-inl.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/string-inl.h"
+#include "nth_caller_visitor.h"
+#include "thread.h"
+#include "well_known_classes.h"
+
+namespace art {
+namespace interpreter {
+
+static void AbortTransactionOrFail(Thread* self, const char* fmt, ...)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ va_list args;
+ va_start(args, fmt);
+ if (Runtime::Current()->IsActiveTransaction()) {
+ AbortTransaction(self, fmt, args);
+ va_end(args);
+ } else {
+ LOG(FATAL) << "Trying to abort, but not in transaction mode: " << StringPrintf(fmt, args);
+ UNREACHABLE();
+ }
+}
+
+// Helper function to deal with class loading in an unstarted runtime.
+static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> className,
+ Handle<mirror::ClassLoader> class_loader, JValue* result,
+ const std::string& method_name, bool initialize_class,
+ bool abort_if_not_found)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(className.Get() != nullptr);
+ std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str()));
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+ mirror::Class* found = class_linker->FindClass(self, descriptor.c_str(), class_loader);
+ if (found == nullptr && abort_if_not_found) {
+ if (!self->IsExceptionPending()) {
+ AbortTransactionOrFail(self, "%s failed in un-started runtime for class: %s",
+ method_name.c_str(), PrettyDescriptor(descriptor.c_str()).c_str());
+ }
+ return;
+ }
+ if (found != nullptr && initialize_class) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(found));
+ if (!class_linker->EnsureInitialized(self, h_class, true, true)) {
+ CHECK(self->IsExceptionPending());
+ return;
+ }
+ }
+ result->SetL(found);
+}
+
+// Common helper for class-loading cutouts in an unstarted runtime. We call Runtime methods that
+// rely on Java code to wrap errors in the correct exception class (i.e., NoClassDefFoundError into
+// ClassNotFoundException), so need to do the same. The only exception is if the exception is
+// actually InternalError. This must not be wrapped, as it signals an initialization abort.
+static void CheckExceptionGenerateClassNotFound(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (self->IsExceptionPending()) {
+ // If it is not an InternalError, wrap it.
+ std::string type(PrettyTypeOf(self->GetException()));
+ if (type != "java.lang.InternalError") {
+ self->ThrowNewWrappedException("Ljava/lang/ClassNotFoundException;",
+ "ClassNotFoundException");
+ }
+ }
+}
+
+static void UnstartedClassForName(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
+ UnstartedRuntimeFindClass(self, h_class_name, NullHandle<mirror::ClassLoader>(), result,
+ "Class.forName", true, false);
+ CheckExceptionGenerateClassNotFound(self);
+}
+
+static void UnstartedClassForNameLong(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
+ mirror::ClassLoader* class_loader =
+ down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+ UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, "Class.forName",
+ initialize_class, false);
+ CheckExceptionGenerateClassNotFound(self);
+}
+
+static void UnstartedClassClassForName(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
+ mirror::ClassLoader* class_loader =
+ down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+ UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, "Class.classForName",
+ initialize_class, false);
+ CheckExceptionGenerateClassNotFound(self);
+}
+
+static void UnstartedClassNewInstance(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ StackHandleScope<3> hs(self); // Class, constructor, object.
+ mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass));
+
+ // Check that it's not null.
+ if (h_klass.Get() == nullptr) {
+ AbortTransactionOrFail(self, "Class reference is null for newInstance");
+ return;
+ }
+
+ // If we're in a transaction, class must not be finalizable (it or a superclass has a finalizer).
+ if (Runtime::Current()->IsActiveTransaction()) {
+ if (h_klass.Get()->IsFinalizable()) {
+ AbortTransaction(self, "Class for newInstance is finalizable: '%s'",
+ PrettyClass(h_klass.Get()).c_str());
+ return;
+ }
+ }
+
+ // There are two situations in which we'll abort this run.
+ // 1) If the class isn't yet initialized and initialization fails.
+ // 2) If we can't find the default constructor. We'll postpone the exception to runtime.
+ // Note that 2) could likely be handled here, but for safety abort the transaction.
+ bool ok = false;
+ if (Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
+ Handle<mirror::ArtMethod> h_cons(hs.NewHandle(
+ h_klass->FindDeclaredDirectMethod("<init>", "()V")));
+ if (h_cons.Get() != nullptr) {
+ Handle<mirror::Object> h_obj(hs.NewHandle(klass->AllocObject(self)));
+ CHECK(h_obj.Get() != nullptr); // We don't expect OOM at compile-time.
+ EnterInterpreterFromInvoke(self, h_cons.Get(), h_obj.Get(), nullptr, nullptr);
+ if (!self->IsExceptionPending()) {
+ result->SetL(h_obj.Get());
+ ok = true;
+ }
+ } else {
+ self->ThrowNewExceptionF("Ljava/lang/InternalError;",
+ "Could not find default constructor for '%s'",
+ PrettyClass(h_klass.Get()).c_str());
+ }
+ }
+ if (!ok) {
+ AbortTransactionOrFail(self, "Failed in Class.newInstance for '%s' with %s",
+ PrettyClass(h_klass.Get()).c_str(),
+ PrettyTypeOf(self->GetException()).c_str());
+ }
+}
+
+static void UnstartedClassGetDeclaredField(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
+ // going the reflective Dex way.
+ mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ mirror::String* name2 = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+ mirror::ArtField* found = nullptr;
+ mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
+ for (int32_t i = 0; i < fields->GetLength() && found == nullptr; ++i) {
+ mirror::ArtField* f = fields->Get(i);
+ if (name2->Equals(f->GetName())) {
+ found = f;
+ }
+ }
+ if (found == nullptr) {
+ fields = klass->GetSFields();
+ for (int32_t i = 0; i < fields->GetLength() && found == nullptr; ++i) {
+ mirror::ArtField* f = fields->Get(i);
+ if (name2->Equals(f->GetName())) {
+ found = f;
+ }
+ }
+ }
+ if (found == nullptr) {
+ AbortTransactionOrFail(self, "Failed to find field in Class.getDeclaredField in un-started "
+ " runtime. name=%s class=%s", name2->ToModifiedUtf8().c_str(),
+ PrettyDescriptor(klass).c_str());
+ return;
+ }
+ // TODO: getDeclaredField calls GetType once the field is found to ensure a
+ // NoClassDefFoundError is thrown if the field's type cannot be resolved.
+ mirror::Class* jlr_Field = self->DecodeJObject(
+ WellKnownClasses::java_lang_reflect_Field)->AsClass();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> field(hs.NewHandle(jlr_Field->AllocNonMovableObject(self)));
+ CHECK(field.Get() != nullptr);
+ mirror::ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>",
+ "(Ljava/lang/reflect/ArtField;)V");
+ uint32_t args[1];
+ args[0] = StackReference<mirror::Object>::FromMirrorPtr(found).AsVRegValue();
+ EnterInterpreterFromInvoke(self, c, field.Get(), args, nullptr);
+ result->SetL(field.Get());
+}
+
+static void UnstartedVmClassLoaderFindLoadedClass(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+ mirror::ClassLoader* class_loader =
+ down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+ UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result,
+ "VMClassLoader.findLoadedClass", false, false);
+ // This might have an error pending. But semantics are to just return null.
+ if (self->IsExceptionPending()) {
+ // If it is an InternalError, keep it. See CheckExceptionGenerateClassNotFound.
+ std::string type(PrettyTypeOf(self->GetException()));
+ if (type != "java.lang.InternalError") {
+ self->ClearException();
+ }
+ }
+}
+
+static void UnstartedVoidLookupType(Thread* self ATTRIBUTE_UNUSED,
+ ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
+ JValue* result,
+ size_t arg_offset ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
+}
+
+static void UnstartedSystemArraycopy(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Special case array copying without initializing System.
+ mirror::Class* ctype = shadow_frame->GetVRegReference(arg_offset)->GetClass()->GetComponentType();
+ jint srcPos = shadow_frame->GetVReg(arg_offset + 1);
+ jint dstPos = shadow_frame->GetVReg(arg_offset + 3);
+ jint length = shadow_frame->GetVReg(arg_offset + 4);
+ if (!ctype->IsPrimitive()) {
+ mirror::ObjectArray<mirror::Object>* src = shadow_frame->GetVRegReference(arg_offset)->
+ AsObjectArray<mirror::Object>();
+ mirror::ObjectArray<mirror::Object>* dst = shadow_frame->GetVRegReference(arg_offset + 2)->
+ AsObjectArray<mirror::Object>();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else if (ctype->IsPrimitiveChar()) {
+ mirror::CharArray* src = shadow_frame->GetVRegReference(arg_offset)->AsCharArray();
+ mirror::CharArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else if (ctype->IsPrimitiveInt()) {
+ mirror::IntArray* src = shadow_frame->GetVRegReference(arg_offset)->AsIntArray();
+ mirror::IntArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsIntArray();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else {
+ AbortTransactionOrFail(self, "Unimplemented System.arraycopy for type '%s'",
+ PrettyDescriptor(ctype).c_str());
+ }
+}
+
+static void UnstartedThreadLocalGet(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
+ bool ok = false;
+ if (caller == "java.lang.String java.lang.IntegralToString.convertInt"
+ "(java.lang.AbstractStringBuilder, int)") {
+ // Allocate non-threadlocal buffer.
+ result->SetL(mirror::CharArray::Alloc(self, 11));
+ ok = true;
+ } else if (caller == "java.lang.RealToString java.lang.RealToString.getInstance()") {
+ // Note: RealToString is implemented and used in a different fashion than IntegralToString.
+ // Conversion is done over an actual object of RealToString (the conversion method is an
+ // instance method). This means it is not as clear whether it is correct to return a new
+ // object each time. The caller needs to be inspected by hand to see whether it (incorrectly)
+ // stores the object for later use.
+ // See also b/19548084 for a possible rewrite and bringing it in line with IntegralToString.
+ if (shadow_frame->GetLink()->GetLink() != nullptr) {
+ std::string caller2(PrettyMethod(shadow_frame->GetLink()->GetLink()->GetMethod()));
+ if (caller2 == "java.lang.String java.lang.Double.toString(double)") {
+ // Allocate new object.
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> h_real_to_string_class(hs.NewHandle(
+ shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
+ Handle<mirror::Object> h_real_to_string_obj(hs.NewHandle(
+ h_real_to_string_class->AllocObject(self)));
+ if (h_real_to_string_obj.Get() != nullptr) {
+ mirror::ArtMethod* init_method =
+ h_real_to_string_class->FindDirectMethod("<init>", "()V");
+ if (init_method == nullptr) {
+ h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
+ } else {
+ JValue invoke_result;
+ EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
+ nullptr);
+ if (!self->IsExceptionPending()) {
+ result->SetL(h_real_to_string_obj.Get());
+ ok = true;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (!ok) {
+ AbortTransactionOrFail(self, "Could not create RealToString object");
+ }
+}
+
+static void UnstartedMathCeil(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ double in = shadow_frame->GetVRegDouble(arg_offset);
+ double out;
+ // Special cases:
+ // 1) NaN, infinity, +0, -0 -> out := in. All are guaranteed by cmath.
+ // -1 < in < 0 -> out := -0.
+ if (-1.0 < in && in < 0) {
+ out = -0.0;
+ } else {
+ out = ceil(in);
+ }
+ result->SetD(out);
+}
+
+static void UnstartedArtMethodGetMethodName(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = shadow_frame->GetVRegReference(arg_offset)->AsArtMethod();
+ result->SetL(method->GetNameAsString(self));
+}
+
+static void UnstartedObjectHashCode(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
+ result->SetI(obj->IdentityHashCode());
+}
+
+static void UnstartedDoubleDoubleToRawLongBits(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ double in = shadow_frame->GetVRegDouble(arg_offset);
+ result->SetJ(bit_cast<int64_t>(in));
+}
+
+static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ if (dex_file == nullptr) {
+ return nullptr;
+ }
+
+ // Create the direct byte buffer.
+ JNIEnv* env = self->GetJniEnv();
+ DCHECK(env != nullptr);
+ void* address = const_cast<void*>(reinterpret_cast<const void*>(dex_file->Begin()));
+ jobject byte_buffer = env->NewDirectByteBuffer(address, dex_file->Size());
+ if (byte_buffer == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+
+ jvalue args[1];
+ args[0].l = byte_buffer;
+ return self->DecodeJObject(
+ env->CallStaticObjectMethodA(WellKnownClasses::com_android_dex_Dex,
+ WellKnownClasses::com_android_dex_Dex_create,
+ args));
+}
+
+static void UnstartedDexCacheGetDexNative(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // We will create the Dex object, but the image writer will release it before creating the
+ // art file.
+ mirror::Object* src = shadow_frame->GetVRegReference(arg_offset);
+ bool have_dex = false;
+ if (src != nullptr) {
+ mirror::Object* dex = GetDexFromDexCache(self, reinterpret_cast<mirror::DexCache*>(src));
+ if (dex != nullptr) {
+ have_dex = true;
+ result->SetL(dex);
+ }
+ }
+ if (!have_dex) {
+ self->ClearException();
+ Runtime::Current()->AbortTransactionAndThrowInternalError(self, "Could not create Dex object");
+ }
+}
+
+static void UnstartedMemoryPeek(
+ Primitive::Type type, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ int64_t address = shadow_frame->GetVRegLong(arg_offset);
+ // TODO: Check that this is in the heap somewhere. Otherwise we will segfault instead of
+ // aborting the transaction.
+
+ switch (type) {
+ case Primitive::kPrimByte: {
+ result->SetB(*reinterpret_cast<int8_t*>(static_cast<intptr_t>(address)));
+ return;
+ }
+
+ case Primitive::kPrimShort: {
+ result->SetS(*reinterpret_cast<int16_t*>(static_cast<intptr_t>(address)));
+ return;
+ }
+
+ case Primitive::kPrimInt: {
+ result->SetI(*reinterpret_cast<int32_t*>(static_cast<intptr_t>(address)));
+ return;
+ }
+
+ case Primitive::kPrimLong: {
+ result->SetJ(*reinterpret_cast<int64_t*>(static_cast<intptr_t>(address)));
+ return;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimVoid:
+ case Primitive::kPrimNot:
+ LOG(FATAL) << "Not in the Memory API: " << type;
+ UNREACHABLE();
+ }
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+}
+
+static void UnstartedMemoryPeekEntry(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::string name(PrettyMethod(shadow_frame->GetMethod()));
+ if (name == "byte libcore.io.Memory.peekByte(long)") {
+ UnstartedMemoryPeek(Primitive::kPrimByte, shadow_frame, result, arg_offset);
+ } else if (name == "short libcore.io.Memory.peekShortNative(long)") {
+ UnstartedMemoryPeek(Primitive::kPrimShort, shadow_frame, result, arg_offset);
+ } else if (name == "int libcore.io.Memory.peekIntNative(long)") {
+ UnstartedMemoryPeek(Primitive::kPrimInt, shadow_frame, result, arg_offset);
+ } else if (name == "long libcore.io.Memory.peekLongNative(long)") {
+ UnstartedMemoryPeek(Primitive::kPrimLong, shadow_frame, result, arg_offset);
+ } else {
+ LOG(FATAL) << "Unsupported Memory.peek entry: " << name;
+ UNREACHABLE();
+ }
+}
+
+static void UnstartedMemoryPeekArray(
+ Primitive::Type type, Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int64_t address_long = shadow_frame->GetVRegLong(arg_offset);
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 2);
+ if (obj == nullptr) {
+ Runtime::Current()->AbortTransactionAndThrowInternalError(self, "Null pointer in peekArray");
+ return;
+ }
+ mirror::Array* array = obj->AsArray();
+
+ int offset = shadow_frame->GetVReg(arg_offset + 3);
+ int count = shadow_frame->GetVReg(arg_offset + 4);
+ if (offset < 0 || offset + count > array->GetLength()) {
+ std::string error_msg(StringPrintf("Array out of bounds in peekArray: %d/%d vs %d",
+ offset, count, array->GetLength()));
+ Runtime::Current()->AbortTransactionAndThrowInternalError(self, error_msg.c_str());
+ return;
+ }
+
+ switch (type) {
+ case Primitive::kPrimByte: {
+ int8_t* address = reinterpret_cast<int8_t*>(static_cast<intptr_t>(address_long));
+ mirror::ByteArray* byte_array = array->AsByteArray();
+ for (int32_t i = 0; i < count; ++i, ++address) {
+ byte_array->SetWithoutChecks<true>(i + offset, *address);
+ }
+ return;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Type unimplemented for Memory Array API, should not reach here: " << type;
+ UNREACHABLE();
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimVoid:
+ case Primitive::kPrimNot:
+ LOG(FATAL) << "Not in the Memory API: " << type;
+ UNREACHABLE();
+ }
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+}
+
+static void UnstartedMemoryPeekArrayEntry(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::string name(PrettyMethod(shadow_frame->GetMethod()));
+ if (name == "void libcore.io.Memory.peekByteArray(long, byte[], int, int)") {
+ UnstartedMemoryPeekArray(Primitive::kPrimByte, self, shadow_frame, arg_offset);
+ } else {
+ LOG(FATAL) << "Unsupported Memory.peekArray entry: " << name;
+ UNREACHABLE();
+ }
+}
+
+static void UnstartedJNIVMRuntimeNewUnpaddedArray(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int32_t length = args[1];
+ DCHECK_GE(length, 0);
+ mirror::Class* element_class = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
+ Runtime* runtime = Runtime::Current();
+ mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(self, &element_class);
+ DCHECK(array_class != nullptr);
+ gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
+ result->SetL(mirror::Array::Alloc<true, true>(self, array_class, length,
+ array_class->GetComponentSizeShift(), allocator));
+}
+
+static void UnstartedJNIVMStackGetCallingClassLoader(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result) {
+ result->SetL(nullptr);
+}
+
+static void UnstartedJNIVMStackGetStackClass2(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ NthCallerVisitor visitor(self, 3);
+ visitor.WalkStack();
+ if (visitor.caller != nullptr) {
+ result->SetL(visitor.caller->GetDeclaringClass());
+ }
+}
+
+static void UnstartedJNIMathLog(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ JValue value;
+ value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
+ result->SetD(log(value.GetD()));
+}
+
+static void UnstartedJNIMathExp(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ JValue value;
+ value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
+ result->SetD(exp(value.GetD()));
+}
+
+static void UnstartedJNIClassGetNameNative(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(self);
+ result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass())));
+}
+
+static void UnstartedJNIFloatFloatToRawIntBits(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ result->SetI(args[0]);
+}
+
+static void UnstartedJNIFloatIntBitsToFloat(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ result->SetI(args[0]);
+}
+
+static void UnstartedJNIObjectInternalClone(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ result->SetL(receiver->Clone(self));
+}
+
+static void UnstartedJNIObjectNotifyAll(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ receiver->NotifyAll(self);
+}
+
+static void UnstartedJNIStringCompareTo(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* rhs = reinterpret_cast<mirror::Object*>(args[0])->AsString();
+ if (rhs == nullptr) {
+ AbortTransactionOrFail(self, "String.compareTo with null object");
+ }
+ result->SetI(receiver->AsString()->CompareTo(rhs));
+}
+
+static void UnstartedJNIStringIntern(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ result->SetL(receiver->AsString()->Intern());
+}
+
+static void UnstartedJNIStringFastIndexOf(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ result->SetI(receiver->AsString()->FastIndexOf(args[0], args[1]));
+}
+
+static void UnstartedJNIArrayCreateMultiArray(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ StackHandleScope<2> hs(self);
+ auto h_class(hs.NewHandle(reinterpret_cast<mirror::Class*>(args[0])->AsClass()));
+ auto h_dimensions(hs.NewHandle(reinterpret_cast<mirror::IntArray*>(args[1])->AsIntArray()));
+ result->SetL(mirror::Array::CreateMultiArray(self, h_class, h_dimensions));
+}
+
+static void UnstartedJNIThrowableNativeFillInStackTrace(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedObjectAccessUnchecked soa(self);
+ if (Runtime::Current()->IsActiveTransaction()) {
+ result->SetL(soa.Decode<mirror::Object*>(self->CreateInternalStackTrace<true>(soa)));
+ } else {
+ result->SetL(soa.Decode<mirror::Object*>(self->CreateInternalStackTrace<false>(soa)));
+ }
+}
+
+static void UnstartedJNISystemIdentityHashCode(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+ result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
+}
+
+static void UnstartedJNIByteOrderIsLittleEndian(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result) {
+ result->SetZ(JNI_TRUE);
+}
+
+static void UnstartedJNIUnsafeCompareAndSwapInt(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+ jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
+ jint expectedValue = args[3];
+ jint newValue = args[4];
+ bool success;
+ if (Runtime::Current()->IsActiveTransaction()) {
+ success = obj->CasFieldStrongSequentiallyConsistent32<true>(MemberOffset(offset),
+ expectedValue, newValue);
+ } else {
+ success = obj->CasFieldStrongSequentiallyConsistent32<false>(MemberOffset(offset),
+ expectedValue, newValue);
+ }
+ result->SetZ(success ? JNI_TRUE : JNI_FALSE);
+}
+
+static void UnstartedJNIUnsafePutObject(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+ jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
+ mirror::Object* newValue = reinterpret_cast<mirror::Object*>(args[3]);
+ if (Runtime::Current()->IsActiveTransaction()) {
+ obj->SetFieldObject<true>(MemberOffset(offset), newValue);
+ } else {
+ obj->SetFieldObject<false>(MemberOffset(offset), newValue);
+ }
+}
+
+static void UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType(
+ Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* component = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
+ Primitive::Type primitive_type = component->GetPrimitiveType();
+ result->SetI(mirror::Array::DataOffset(Primitive::ComponentSize(primitive_type)).Int32Value());
+}
+
+static void UnstartedJNIUnsafeGetArrayIndexScaleForComponentType(
+ Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* component = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
+ Primitive::Type primitive_type = component->GetPrimitiveType();
+ result->SetI(Primitive::ComponentSize(primitive_type));
+}
+
+typedef void (*InvokeHandler)(Thread* self, ShadowFrame* shadow_frame, JValue* result,
+ size_t arg_size);
+
+typedef void (*JNIHandler)(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
+ uint32_t* args, JValue* result);
+
+static bool tables_initialized_ = false;
+static std::unordered_map<std::string, InvokeHandler> invoke_handlers_;
+static std::unordered_map<std::string, JNIHandler> jni_handlers_;
+
+static void UnstartedRuntimeInitializeInvokeHandlers() {
+ struct InvokeHandlerDef {
+ std::string name;
+ InvokeHandler function;
+ };
+
+ InvokeHandlerDef defs[] {
+ { "java.lang.Class java.lang.Class.forName(java.lang.String)",
+ &UnstartedClassForName },
+ { "java.lang.Class java.lang.Class.forName(java.lang.String, boolean, java.lang.ClassLoader)",
+ &UnstartedClassForNameLong },
+ { "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)",
+ &UnstartedClassClassForName },
+ { "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)",
+ &UnstartedVmClassLoaderFindLoadedClass },
+ { "java.lang.Class java.lang.Void.lookupType()",
+ &UnstartedVoidLookupType },
+ { "java.lang.Object java.lang.Class.newInstance()",
+ &UnstartedClassNewInstance },
+ { "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)",
+ &UnstartedClassGetDeclaredField },
+ { "int java.lang.Object.hashCode()",
+ &UnstartedObjectHashCode },
+ { "java.lang.String java.lang.reflect.ArtMethod.getMethodName(java.lang.reflect.ArtMethod)",
+ &UnstartedArtMethodGetMethodName },
+ { "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)",
+ &UnstartedSystemArraycopy},
+ { "void java.lang.System.arraycopy(char[], int, char[], int, int)",
+ &UnstartedSystemArraycopy },
+ { "void java.lang.System.arraycopy(int[], int, int[], int, int)",
+ &UnstartedSystemArraycopy },
+ { "long java.lang.Double.doubleToRawLongBits(double)",
+ &UnstartedDoubleDoubleToRawLongBits },
+ { "double java.lang.Math.ceil(double)",
+ &UnstartedMathCeil },
+ { "java.lang.Object java.lang.ThreadLocal.get()",
+ &UnstartedThreadLocalGet },
+ { "com.android.dex.Dex java.lang.DexCache.getDexNative()",
+ &UnstartedDexCacheGetDexNative },
+ { "byte libcore.io.Memory.peekByte(long)",
+ &UnstartedMemoryPeekEntry },
+ { "short libcore.io.Memory.peekShortNative(long)",
+ &UnstartedMemoryPeekEntry },
+ { "int libcore.io.Memory.peekIntNative(long)",
+ &UnstartedMemoryPeekEntry },
+ { "long libcore.io.Memory.peekLongNative(long)",
+ &UnstartedMemoryPeekEntry },
+ { "void libcore.io.Memory.peekByteArray(long, byte[], int, int)",
+ &UnstartedMemoryPeekArrayEntry },
+ };
+
+ for (auto& def : defs) {
+ invoke_handlers_.insert(std::make_pair(def.name, def.function));
+ }
+}
+
+static void UnstartedRuntimeInitializeJNIHandlers() {
+ struct JNIHandlerDef {
+ std::string name;
+ JNIHandler function;
+ };
+
+ JNIHandlerDef defs[] {
+ { "java.lang.Object dalvik.system.VMRuntime.newUnpaddedArray(java.lang.Class, int)",
+ &UnstartedJNIVMRuntimeNewUnpaddedArray },
+ { "java.lang.ClassLoader dalvik.system.VMStack.getCallingClassLoader()",
+ &UnstartedJNIVMStackGetCallingClassLoader },
+ { "java.lang.Class dalvik.system.VMStack.getStackClass2()",
+ &UnstartedJNIVMStackGetStackClass2 },
+ { "double java.lang.Math.log(double)",
+ &UnstartedJNIMathLog },
+ { "java.lang.String java.lang.Class.getNameNative()",
+ &UnstartedJNIClassGetNameNative },
+ { "int java.lang.Float.floatToRawIntBits(float)",
+ &UnstartedJNIFloatFloatToRawIntBits },
+ { "float java.lang.Float.intBitsToFloat(int)",
+ &UnstartedJNIFloatIntBitsToFloat },
+ { "double java.lang.Math.exp(double)",
+ &UnstartedJNIMathExp },
+ { "java.lang.Object java.lang.Object.internalClone()",
+ &UnstartedJNIObjectInternalClone },
+ { "void java.lang.Object.notifyAll()",
+ &UnstartedJNIObjectNotifyAll},
+ { "int java.lang.String.compareTo(java.lang.String)",
+ &UnstartedJNIStringCompareTo },
+ { "java.lang.String java.lang.String.intern()",
+ &UnstartedJNIStringIntern },
+ { "int java.lang.String.fastIndexOf(int, int)",
+ &UnstartedJNIStringFastIndexOf },
+ { "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])",
+ &UnstartedJNIArrayCreateMultiArray },
+ { "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()",
+ &UnstartedJNIThrowableNativeFillInStackTrace },
+ { "int java.lang.System.identityHashCode(java.lang.Object)",
+ &UnstartedJNISystemIdentityHashCode },
+ { "boolean java.nio.ByteOrder.isLittleEndian()",
+ &UnstartedJNIByteOrderIsLittleEndian },
+ { "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)",
+ &UnstartedJNIUnsafeCompareAndSwapInt },
+ { "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)",
+ &UnstartedJNIUnsafePutObject },
+ { "int sun.misc.Unsafe.getArrayBaseOffsetForComponentType(java.lang.Class)",
+ &UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType },
+ { "int sun.misc.Unsafe.getArrayIndexScaleForComponentType(java.lang.Class)",
+ &UnstartedJNIUnsafeGetArrayIndexScaleForComponentType },
+ };
+
+ for (auto& def : defs) {
+ jni_handlers_.insert(std::make_pair(def.name, def.function));
+ }
+}
+
+void UnstartedRuntimeInitialize() {
+ CHECK(!tables_initialized_);
+
+ UnstartedRuntimeInitializeInvokeHandlers();
+ UnstartedRuntimeInitializeJNIHandlers();
+
+ tables_initialized_ = true;
+}
+
+void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ // In a runtime that's not started we intercept certain methods to avoid complicated dependency
+ // problems in core libraries.
+ CHECK(tables_initialized_);
+
+ std::string name(PrettyMethod(shadow_frame->GetMethod()));
+ const auto& iter = invoke_handlers_.find(name);
+ if (iter != invoke_handlers_.end()) {
+ (*iter->second)(self, shadow_frame, result, arg_offset);
+ } else {
+ // Not special, continue with regular interpreter execution.
+ artInterpreterToInterpreterBridge(self, code_item, shadow_frame, result);
+ }
+}
+
+// Hand select a number of methods to be run in a not yet started runtime without using JNI.
+void UnstartedRuntimeJni(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
+ uint32_t* args, JValue* result) {
+ std::string name(PrettyMethod(method));
+ const auto& iter = jni_handlers_.find(name);
+ if (iter != jni_handlers_.end()) {
+ (*iter->second)(self, method, receiver, args, result);
+ } else if (Runtime::Current()->IsActiveTransaction()) {
+ AbortTransaction(self, "Attempt to invoke native method in non-started runtime: %s",
+ name.c_str());
+ } else {
+ LOG(FATAL) << "Calling native method " << PrettyMethod(method) << " in an unstarted "
+ "non-transactional runtime";
+ }
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h
new file mode 100644
index 0000000..2d7d380
--- /dev/null
+++ b/runtime/interpreter/unstarted_runtime.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_UNSTARTED_RUNTIME_H_
+#define ART_RUNTIME_INTERPRETER_UNSTARTED_RUNTIME_H_
+
+#include "interpreter.h"
+
+#include "dex_file.h"
+#include "jvalue.h"
+
+namespace art {
+
+class Thread;
+class ShadowFrame;
+
+namespace mirror {
+
+class ArtMethod;
+class Object;
+
+} // namespace mirror
+
+namespace interpreter {
+
+void UnstartedRuntimeInitialize();
+
+void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame,
+ JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+void UnstartedRuntimeJni(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
+ uint32_t* args, JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_UNSTARTED_RUNTIME_H_
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 08332d3..e68616f 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -741,8 +741,7 @@
}
// Throwing can cause libraries_lock to be reacquired.
if (native_method == nullptr) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewException(throw_location, "Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
+ self->ThrowNewException("Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
}
return native_method;
}
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index fc08d23..4bf7142 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -596,17 +596,15 @@
return;
}
- DebugInvokeReq* pReq = Dbg::GetInvokeReq();
while (true) {
- pReq->ready = true;
Dbg::SuspendSelf();
- pReq->ready = false;
/*
* The JDWP thread has told us (and possibly all other threads) to
* resume. See if it has left anything in our DebugInvokeReq mailbox.
*/
- if (!pReq->invoke_needed) {
+ DebugInvokeReq* const pReq = Dbg::GetInvokeReq();
+ if (pReq == nullptr) {
/*LOGD("SuspendByPolicy: no invoke needed");*/
break;
}
@@ -614,10 +612,7 @@
/* grab this before posting/suspending again */
AcquireJdwpTokenForEvent(thread_self_id);
- /* leave pReq->invoke_needed_ raised so we can check reentrancy */
Dbg::ExecuteMethod(pReq);
-
- pReq->error = ERR_NONE;
}
}
@@ -650,7 +645,7 @@
*/
bool JdwpState::InvokeInProgress() {
DebugInvokeReq* pReq = Dbg::GetInvokeReq();
- return pReq->invoke_needed;
+ return pReq != nullptr;
}
void JdwpState::AcquireJdwpTokenForCommand() {
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 0ce4de7..c7083dc 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -91,9 +91,9 @@
* If "is_constructor" is set, this returns "object_id" rather than the
* expected-to-be-void return value of the called function.
*/
-static JdwpError FinishInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
- ObjectId thread_id, ObjectId object_id,
- RefTypeId class_id, MethodId method_id, bool is_constructor)
+static JdwpError RequestInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
+ ObjectId thread_id, ObjectId object_id,
+ RefTypeId class_id, MethodId method_id, bool is_constructor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(!is_constructor || object_id != 0);
@@ -131,37 +131,35 @@
return err;
}
- if (err == ERR_NONE) {
- if (is_constructor) {
- // If we invoked a constructor (which actually returns void), return the receiver,
- // unless we threw, in which case we return NULL.
- resultTag = JT_OBJECT;
- resultValue = (exceptObjId == 0) ? object_id : 0;
- }
+ if (is_constructor) {
+ // If we invoked a constructor (which actually returns void), return the receiver,
+ // unless we threw, in which case we return NULL.
+ resultTag = JT_OBJECT;
+ resultValue = (exceptObjId == 0) ? object_id : 0;
+ }
- size_t width = Dbg::GetTagWidth(resultTag);
- expandBufAdd1(pReply, resultTag);
- if (width != 0) {
- WriteValue(pReply, width, resultValue);
- }
- expandBufAdd1(pReply, JT_OBJECT);
- expandBufAddObjectId(pReply, exceptObjId);
+ size_t width = Dbg::GetTagWidth(resultTag);
+ expandBufAdd1(pReply, resultTag);
+ if (width != 0) {
+ WriteValue(pReply, width, resultValue);
+ }
+ expandBufAdd1(pReply, JT_OBJECT);
+ expandBufAddObjectId(pReply, exceptObjId);
- VLOG(jdwp) << " --> returned " << resultTag
- << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", resultValue, exceptObjId);
+ VLOG(jdwp) << " --> returned " << resultTag
+ << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", resultValue, exceptObjId);
- /* show detailed debug output */
- if (resultTag == JT_STRING && exceptObjId == 0) {
- if (resultValue != 0) {
- if (VLOG_IS_ON(jdwp)) {
- std::string result_string;
- JDWP::JdwpError error = Dbg::StringToUtf8(resultValue, &result_string);
- CHECK_EQ(error, JDWP::ERR_NONE);
- VLOG(jdwp) << " string '" << result_string << "'";
- }
- } else {
- VLOG(jdwp) << " string (null)";
+ /* show detailed debug output */
+ if (resultTag == JT_STRING && exceptObjId == 0) {
+ if (resultValue != 0) {
+ if (VLOG_IS_ON(jdwp)) {
+ std::string result_string;
+ JDWP::JdwpError error = Dbg::StringToUtf8(resultValue, &result_string);
+ CHECK_EQ(error, JDWP::ERR_NONE);
+ VLOG(jdwp) << " string '" << result_string << "'";
}
+ } else {
+ VLOG(jdwp) << " string (null)";
}
}
@@ -693,7 +691,7 @@
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
- return FinishInvoke(state, request, pReply, thread_id, 0, class_id, method_id, false);
+ return RequestInvoke(state, request, pReply, thread_id, 0, class_id, method_id, false);
}
/*
@@ -717,7 +715,7 @@
if (object_id == 0) {
return ERR_OUT_OF_MEMORY;
}
- return FinishInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, true);
+ return RequestInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, true);
}
/*
@@ -879,7 +877,7 @@
RefTypeId class_id = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
- return FinishInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, false);
+ return RequestInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, false);
}
static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*)
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 539c181..9b89459 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -40,16 +40,32 @@
options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheCapacity);
jit_options->compile_threshold_ =
options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
+ jit_options->dump_info_on_shutdown_ =
+ options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
return jit_options;
}
+void Jit::DumpInfo(std::ostream& os) {
+ os << "Code cache size=" << PrettySize(code_cache_->CodeCacheSize())
+ << " data cache size=" << PrettySize(code_cache_->DataCacheSize())
+ << " num methods=" << code_cache_->NumMethods()
+ << "\n";
+ cumulative_timings_.Dump(os);
+}
+
+void Jit::AddTimingLogger(const TimingLogger& logger) {
+ cumulative_timings_.AddLogger(logger);
+}
+
Jit::Jit()
: jit_library_handle_(nullptr), jit_compiler_handle_(nullptr), jit_load_(nullptr),
- jit_compile_method_(nullptr) {
+ jit_compile_method_(nullptr), dump_info_on_shutdown_(false),
+ cumulative_timings_("JIT timings") {
}
Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
std::unique_ptr<Jit> jit(new Jit);
+ jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown();
if (!jit->LoadCompiler(error_msg)) {
return nullptr;
}
@@ -133,6 +149,9 @@
}
Jit::~Jit() {
+ if (dump_info_on_shutdown_) {
+ DumpInfo(LOG(INFO));
+ }
DeleteThreadPool();
if (jit_compiler_handle_ != nullptr) {
jit_unload_(jit_compiler_handle_);
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index b80015f..6b206d1 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -24,6 +24,7 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/timing_logger.h"
#include "gc_root.h"
#include "jni.h"
#include "object_callbacks.h"
@@ -61,6 +62,11 @@
return code_cache_.get();
}
void DeleteThreadPool();
+ // Dump interesting info: #methods compiled, code vs data size, compile / verify cumulative
+ // loggers.
+ void DumpInfo(std::ostream& os);
+ // Add a timing logger to cumulative_timings_.
+ void AddTimingLogger(const TimingLogger& logger);
private:
Jit();
@@ -73,6 +79,10 @@
void (*jit_unload_)(void*);
bool (*jit_compile_method_)(void*, mirror::ArtMethod*, Thread*);
+ // Performance monitoring.
+ bool dump_info_on_shutdown_;
+ CumulativeLogger cumulative_timings_;
+
std::unique_ptr<jit::JitInstrumentationCache> instrumentation_cache_;
std::unique_ptr<jit::JitCodeCache> code_cache_;
CompilerCallbacks* compiler_callbacks_; // Owned by the jit compiler.
@@ -87,12 +97,16 @@
size_t GetCodeCacheCapacity() const {
return code_cache_capacity_;
}
+ bool DumpJitInfoOnShutdown() const {
+ return dump_info_on_shutdown_;
+ }
private:
size_t code_cache_capacity_;
size_t compile_threshold_;
+ bool dump_info_on_shutdown_;
- JitOptions() : code_cache_capacity_(0), compile_threshold_(0) {
+ JitOptions() : code_cache_capacity_(0), compile_threshold_(0), dump_info_on_shutdown_(false) {
}
};
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4ae4d57..4d367e0 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -77,6 +77,7 @@
if (size > CodeCacheRemain()) {
return nullptr;
}
+ ++num_methods_; // TODO: This is hacky but works since each method has exactly one code region.
code_cache_ptr_ += size;
return code_cache_ptr_ - size;
}
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 9576f4b..425d2d3 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -39,7 +39,6 @@
} // namespace mirror
union JValue;
class Thread;
-class ThrowLocation;
namespace jit {
@@ -83,8 +82,7 @@
mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
mirror::ArtField* /*field*/, const JValue& /*field_value*/)
OVERRIDE { }
- virtual void ExceptionCaught(Thread* /*thread*/, const ThrowLocation& /*throw_location*/,
- mirror::ArtMethod* /*catch_method*/, uint32_t /*catch_dex_pc*/,
+ virtual void ExceptionCaught(Thread* /*thread*/,
mirror::Throwable* /*exception_object*/) OVERRIDE { }
virtual void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/,
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 561302e..6063e1e 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -45,7 +45,6 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
-#include "nativebridge/native_bridge.h"
#include "parsed_options.h"
#include "reflection.h"
#include "runtime.h"
@@ -89,9 +88,8 @@
static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c,
const char* name, const char* sig, const char* kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
std::string temp;
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchMethodError;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
"no %s method \"%s.%s%s\"",
kind, c->GetDescriptor(&temp), name, sig);
}
@@ -102,8 +100,7 @@
LOG(return_errors ? ERROR : FATAL) << "Failed to register native method in "
<< PrettyDescriptor(c) << " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< ": " << kind << " is null at index " << idx;
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchMethodError;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
"%s is null at index %d", kind, idx);
}
@@ -196,16 +193,15 @@
if (field_type == nullptr) {
// Failed to find type from the signature of the field.
DCHECK(soa.Self()->IsExceptionPending());
- ThrowLocation throw_location;
StackHandleScope<1> hs2(soa.Self());
- Handle<mirror::Throwable> cause(hs2.NewHandle(soa.Self()->GetException(&throw_location)));
+ Handle<mirror::Throwable> cause(hs2.NewHandle(soa.Self()->GetException()));
soa.Self()->ClearException();
std::string temp;
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;",
"no type \"%s\" found and so no field \"%s\" "
"could be found in class \"%s\" or its superclasses", sig, name,
c->GetDescriptor(&temp));
- soa.Self()->GetException(nullptr)->SetCause(cause.Get());
+ soa.Self()->GetException()->SetCause(cause.Get());
return nullptr;
}
std::string temp;
@@ -216,8 +212,7 @@
field = c->FindInstanceField(name, field_type->GetDescriptor(&temp));
}
if (field == nullptr) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;",
"no \"%s\" field \"%s\" in class \"%s\" or its superclasses",
sig, name, c->GetDescriptor(&temp));
return nullptr;
@@ -229,8 +224,7 @@
jsize length, const char* identifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string type(PrettyTypeOf(array));
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
"%s offset=%d length=%d %s.length=%d",
type.c_str(), start, length, identifier, array->GetLength());
}
@@ -238,8 +232,7 @@
static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length,
jsize array_length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/StringIndexOutOfBoundsException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
"offset=%d length=%d string.length()=%d", start, length,
array_length);
}
@@ -282,8 +275,7 @@
return JNI_ERR;
}
ScopedObjectAccess soa(env);
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->SetException(throw_location, soa.Decode<mirror::Throwable*>(exception.get()));
+ soa.Self()->SetException(soa.Decode<mirror::Throwable*>(exception.get()));
return JNI_OK;
}
@@ -433,8 +425,7 @@
if (exception == nullptr) {
return JNI_ERR;
}
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->SetException(throw_location, exception);
+ soa.Self()->SetException(exception);
return JNI_OK;
}
@@ -456,25 +447,14 @@
ScopedObjectAccess soa(env);
// If we have no exception to describe, pass through.
- if (!soa.Self()->GetException(nullptr)) {
+ if (!soa.Self()->GetException()) {
return;
}
- StackHandleScope<3> hs(soa.Self());
- // TODO: Use nullptr instead of null handles?
- auto old_throw_this_object(hs.NewHandle<mirror::Object>(nullptr));
- auto old_throw_method(hs.NewHandle<mirror::ArtMethod>(nullptr));
- auto old_exception(hs.NewHandle<mirror::Throwable>(nullptr));
- uint32_t old_throw_dex_pc;
- {
- ThrowLocation old_throw_location;
- mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
- old_throw_this_object.Assign(old_throw_location.GetThis());
- old_throw_method.Assign(old_throw_location.GetMethod());
- old_exception.Assign(old_exception_obj);
- old_throw_dex_pc = old_throw_location.GetDexPc();
- soa.Self()->ClearException();
- }
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Throwable> old_exception(
+ hs.NewHandle<mirror::Throwable>(soa.Self()->GetException()));
+ soa.Self()->ClearException();
ScopedLocalRef<jthrowable> exception(env,
soa.AddLocalReference<jthrowable>(old_exception.Get()));
ScopedLocalRef<jclass> exception_class(env, env->GetObjectClass(exception.get()));
@@ -485,20 +465,17 @@
} else {
env->CallVoidMethod(exception.get(), mid);
if (soa.Self()->IsExceptionPending()) {
- LOG(WARNING) << "JNI WARNING: " << PrettyTypeOf(soa.Self()->GetException(nullptr))
+ LOG(WARNING) << "JNI WARNING: " << PrettyTypeOf(soa.Self()->GetException())
<< " thrown while calling printStackTrace";
soa.Self()->ClearException();
}
}
- ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
- old_throw_dex_pc);
-
- soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
+ soa.Self()->SetException(old_exception.Get());
}
static jthrowable ExceptionOccurred(JNIEnv* env) {
ScopedObjectAccess soa(env);
- mirror::Object* exception = soa.Self()->GetException(nullptr);
+ mirror::Object* exception = soa.Self()->GetException();
return soa.AddLocalReference<jthrowable>(exception);
}
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index b3820be..939a1a9 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -23,6 +23,7 @@
#include "base/macros.h"
#include "base/value_object.h"
#include "globals.h"
+#include "utils.h"
namespace art {
@@ -45,14 +46,64 @@
uint8_t* start() const { return reinterpret_cast<uint8_t*>(pointer_); }
uint8_t* end() const { return start() + size_; }
+ // Load value of type `T` at `offset`. The memory address corresponding
+ // to `offset` should be word-aligned.
template<typename T> T Load(uintptr_t offset) const {
+ // TODO: DCHECK that the address is word-aligned.
return *ComputeInternalPointer<T>(offset);
}
+ // Store `value` (of type `T`) at `offset`. The memory address
+ // corresponding to `offset` should be word-aligned.
template<typename T> void Store(uintptr_t offset, T value) const {
+ // TODO: DCHECK that the address is word-aligned.
*ComputeInternalPointer<T>(offset) = value;
}
+ // TODO: Local hack to prevent name clashes between two conflicting
+ // implementations of bit_cast:
+ // - art::bit_cast<Destination, Source> runtime/base/casts.h, and
+ // - art::bit_cast<Source, Destination> from runtime/utils.h.
+ // Remove this when these routines have been merged.
+ template<typename Source, typename Destination>
+ static Destination local_bit_cast(Source in) {
+ static_assert(sizeof(Source) <= sizeof(Destination),
+ "Size of Source not <= size of Destination");
+ union {
+ Source u;
+ Destination v;
+ } tmp;
+ tmp.u = in;
+ return tmp.v;
+ }
+
+ // Load value of type `T` at `offset`. The memory address corresponding
+ // to `offset` does not need to be word-aligned.
+ template<typename T> T LoadUnaligned(uintptr_t offset) const {
+ // Equivalent unsigned integer type corresponding to T.
+ typedef typename UnsignedIntegerType<sizeof(T)>::type U;
+ U equivalent_unsigned_integer_value = 0;
+ // Read the value byte by byte in a little-endian fashion.
+ for (size_t i = 0; i < sizeof(U); ++i) {
+ equivalent_unsigned_integer_value +=
+ *ComputeInternalPointer<uint8_t>(offset + i) << (i * kBitsPerByte);
+ }
+ return local_bit_cast<U, T>(equivalent_unsigned_integer_value);
+ }
+
+ // Store `value` (of type `T`) at `offset`. The memory address
+ // corresponding to `offset` does not need to be word-aligned.
+ template<typename T> void StoreUnaligned(uintptr_t offset, T value) const {
+ // Equivalent unsigned integer type corresponding to T.
+ typedef typename UnsignedIntegerType<sizeof(T)>::type U;
+ U equivalent_unsigned_integer_value = local_bit_cast<T, U>(value);
+ // Write the value byte by byte in a little-endian fashion.
+ for (size_t i = 0; i < sizeof(U); ++i) {
+ *ComputeInternalPointer<uint8_t>(offset + i) =
+ (equivalent_unsigned_integer_value >> (i * kBitsPerByte)) & 0xFF;
+ }
+ }
+
template<typename T> T* PointerTo(uintptr_t offset) const {
return ComputeInternalPointer<T>(offset);
}
diff --git a/runtime/memory_region_test.cc b/runtime/memory_region_test.cc
new file mode 100644
index 0000000..72e03a4
--- /dev/null
+++ b/runtime/memory_region_test.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "memory_region.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(MemoryRegion, LoadUnaligned) {
+ const size_t n = 8;
+ uint8_t data[n] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ MemoryRegion region(&data, n);
+
+ ASSERT_EQ(0, region.LoadUnaligned<char>(0));
+ ASSERT_EQ(1u
+ + (2u << kBitsPerByte)
+ + (3u << 2 * kBitsPerByte)
+ + (4u << 3 * kBitsPerByte),
+ region.LoadUnaligned<uint32_t>(1));
+ ASSERT_EQ(5 + (6 << kBitsPerByte), region.LoadUnaligned<int16_t>(5));
+ ASSERT_EQ(7u, region.LoadUnaligned<unsigned char>(7));
+}
+
+TEST(MemoryRegion, StoreUnaligned) {
+ const size_t n = 8;
+ uint8_t data[n] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ MemoryRegion region(&data, n);
+
+ region.StoreUnaligned<unsigned char>(0u, 7);
+ region.StoreUnaligned<int16_t>(1, 6 + (5 << kBitsPerByte));
+ region.StoreUnaligned<uint32_t>(3,
+ 4u
+ + (3u << kBitsPerByte)
+ + (2u << 2 * kBitsPerByte)
+ + (1u << 3 * kBitsPerByte));
+ region.StoreUnaligned<char>(7, 0);
+
+ uint8_t expected[n] = { 7, 6, 5, 4, 3, 2, 1, 0 };
+ for (size_t i = 0; i < n; ++i) {
+ ASSERT_EQ(expected[i], data[i]);
+ }
+}
+
+} // namespace art
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 85fc5f3..bc58709 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -271,9 +271,8 @@
const DexFile::CodeItem* code_item = h_this->GetCodeItem();
// Set aside the exception while we resolve its type.
Thread* self = Thread::Current();
- ThrowLocation throw_location;
StackHandleScope<1> hs(self);
- Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException(&throw_location)));
+ Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException()));
self->ClearException();
// Default to handler not found.
uint32_t found_dex_pc = DexFile::kDexNoIndex;
@@ -309,7 +308,7 @@
}
// Put the exception back.
if (exception.Get() != nullptr) {
- self->SetException(throw_location, exception.Get());
+ self->SetException(exception.Get());
}
return found_dex_pc;
}
@@ -434,7 +433,7 @@
#else
(*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
#endif
- if (UNLIKELY(self->GetException(nullptr) == Thread::GetDeoptimizationException())) {
+ if (UNLIKELY(self->GetException() == Thread::GetDeoptimizationException())) {
// Unusual case where we were running generated code and an
// exception was thrown to force the activations to be removed from the
// stack. Continue execution in the interpreter.
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 96b15dd..6f4ef60 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -77,13 +77,9 @@
<< "Attempt to set as erroneous an already erroneous class " << PrettyClass(this);
// Stash current exception.
- StackHandleScope<3> hs(self);
- ThrowLocation old_throw_location;
- Handle<mirror::Throwable> old_exception(hs.NewHandle(self->GetException(&old_throw_location)));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Throwable> old_exception(hs.NewHandle(self->GetException()));
CHECK(old_exception.Get() != nullptr);
- Handle<mirror::Object> old_throw_this_object(hs.NewHandle(old_throw_location.GetThis()));
- Handle<mirror::ArtMethod> old_throw_method(hs.NewHandle(old_throw_location.GetMethod()));
- uint32_t old_throw_dex_pc = old_throw_location.GetDexPc();
Class* eiie_class;
// Do't attempt to use FindClass if we have an OOM error since this can try to do more
// allocations and may cause infinite loops.
@@ -109,9 +105,7 @@
}
// Restore exception.
- ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
- old_throw_dex_pc);
- self->SetException(gc_safe_throw_location, old_exception.Get());
+ self->SetException(old_exception.Get());
}
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
if (Runtime::Current()->IsActiveTransaction()) {
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 3c947ab..c548c03 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -59,6 +59,10 @@
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
+ static MemberOffset DexOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(DexCache, dex_);
+ }
+
static MemberOffset StringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
}
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 96d426b..80d5135 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -233,9 +233,8 @@
std::string actualSrcType(PrettyTypeOf(o));
std::string dstType(PrettyTypeOf(this));
Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
if (throw_exception) {
- self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;",
+ self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
"source[%d] of type %s cannot be stored in destination array of type %s",
src_pos + i, actualSrcType.c_str(), dstType.c_str());
} else {
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 9b345a6..21972a1 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -124,12 +124,12 @@
EXPECT_TRUE(oa->Get(-1) == NULL);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
EXPECT_TRUE(oa->Get(2) == NULL);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
ASSERT_TRUE(oa->GetClass() != NULL);
@@ -213,12 +213,12 @@
EXPECT_EQ(0, a->Get(-1));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
EXPECT_EQ(0, a->Get(2));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
}
@@ -262,12 +262,12 @@
EXPECT_DOUBLE_EQ(0, a->Get(-1));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
EXPECT_DOUBLE_EQ(0, a->Get(2));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
}
@@ -292,12 +292,12 @@
EXPECT_FLOAT_EQ(0, a->Get(-1));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
EXPECT_FLOAT_EQ(0, a->Get(2));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
}
@@ -335,7 +335,7 @@
dims->Set<false>(0, -1);
multi = Array::CreateMultiArray(soa.Self(), c, dims);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(PrettyDescriptor(soa.Self()->GetException(NULL)->GetClass()),
+ EXPECT_EQ(PrettyDescriptor(soa.Self()->GetException()->GetClass()),
"java.lang.NegativeArraySizeException");
soa.Self()->ClearException();
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 45a971d..d41d37e 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -300,13 +300,12 @@
va_list args;
va_start(args, fmt);
Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionV(throw_location, "Ljava/lang/IllegalMonitorStateException;", fmt, args);
+ self->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args);
if (!Runtime::Current()->IsStarted() || VLOG_IS_ON(monitor)) {
std::ostringstream ss;
self->Dump(ss);
LOG(Runtime::Current()->IsStarted() ? INFO : ERROR)
- << self->GetException(NULL)->Dump() << "\n" << ss.str();
+ << self->GetException()->Dump() << "\n" << ss.str();
}
va_end(args);
}
@@ -428,8 +427,7 @@
// Enforce the timeout range.
if (ms < 0 || ns < 0 || ns > 999999) {
monitor_lock_.Unlock(self);
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionF(throw_location, "Ljava/lang/IllegalArgumentException;",
+ self->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
"timeout arguments out of range: ms=%" PRId64 " ns=%d", ms, ns);
return;
}
@@ -540,8 +538,7 @@
self->SetInterruptedLocked(false);
}
if (interruptShouldThrow) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewException(throw_location, "Ljava/lang/InterruptedException;", NULL);
+ self->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
}
}
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index e1fe3eb..c182a4d 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -16,31 +16,17 @@
#include "dalvik_system_DexFile.h"
-#include <algorithm>
-#include <set>
-#include <fcntl.h>
-#ifdef __linux__
-#include <sys/sendfile.h>
-#else
-#include <sys/socket.h>
-#endif
-#include <sys/stat.h>
-#include <unistd.h>
-
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
-#include "gc/space/image_space.h"
-#include "gc/space/space-inl.h"
-#include "image.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/string.h"
-#include "oat.h"
+#include "oat_file_assistant.h"
#include "os.h"
#include "profiler.h"
#include "runtime.h"
@@ -51,11 +37,6 @@
#include "well_known_classes.h"
#include "zip_archive.h"
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wshadow"
-#include "ScopedFd.h"
-#pragma GCC diagnostic pop
-
namespace art {
static std::unique_ptr<std::vector<const DexFile*>>
@@ -182,10 +163,9 @@
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
- bool success = linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), &error_msgs,
- &dex_files);
+ dex_files = linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), &error_msgs);
- if (success || !dex_files.empty()) {
+ if (!dex_files.empty()) {
jlongArray array = ConvertNativeToJavaArray(env, dex_files);
if (array == nullptr) {
ScopedObjectAccess soa(env);
@@ -197,9 +177,6 @@
}
return array;
} else {
- // The vector should be empty after a failed loading attempt.
- DCHECK_EQ(0U, dex_files.size());
-
ScopedObjectAccess soa(env);
CHECK(!error_msgs.empty());
// The most important message is at the end. So set up nesting by going forward, which will
@@ -320,40 +297,6 @@
return result;
}
-static void CopyProfileFile(const char* oldfile, const char* newfile) {
- ScopedFd src(open(oldfile, O_RDONLY));
- if (src.get() == -1) {
- PLOG(ERROR) << "Failed to open profile file " << oldfile
- << ". My uid:gid is " << getuid() << ":" << getgid();
- return;
- }
-
- struct stat stat_src;
- if (fstat(src.get(), &stat_src) == -1) {
- PLOG(ERROR) << "Failed to get stats for profile file " << oldfile
- << ". My uid:gid is " << getuid() << ":" << getgid();
- return;
- }
-
- // Create the copy with rw------- (only accessible by system)
- ScopedFd dst(open(newfile, O_WRONLY|O_CREAT|O_TRUNC, 0600));
- if (dst.get() == -1) {
- PLOG(ERROR) << "Failed to create/write prev profile file " << newfile
- << ". My uid:gid is " << getuid() << ":" << getgid();
- return;
- }
-
-#ifdef __linux__
- if (sendfile(dst.get(), src.get(), nullptr, stat_src.st_size) == -1) {
-#else
- off_t len;
- if (sendfile(dst.get(), src.get(), 0, &len, nullptr, 0) == -1) {
-#endif
- PLOG(ERROR) << "Failed to copy profile file " << oldfile << " to " << newfile
- << ". My uid:gid is " << getuid() << ":" << getgid();
- }
-}
-
// Java: dalvik.system.DexFile.UP_TO_DATE
static const jbyte kUpToDate = 0;
// Java: dalvik.system.DexFile.DEXOPT_NEEDED
@@ -361,102 +304,8 @@
// Java: dalvik.system.DexFile.PATCHOAT_NEEDED
static const jbyte kDexoptNeeded = 2;
-template <const bool kVerboseLogging, const bool kReasonLogging>
-static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char* filename,
- InstructionSet target_instruction_set,
- bool* oat_is_pic) {
- std::string error_msg;
- std::unique_ptr<const OatFile> oat_file(OatFile::Open(oat_filename, oat_filename, nullptr,
- nullptr,
- false, &error_msg));
- if (oat_file.get() == nullptr) {
- // Note that even though this is kDexoptNeeded, we use
- // kVerboseLogging instead of the usual kReasonLogging since it is
- // the common case on first boot and very spammy.
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded failed to open oat file '" << oat_filename
- << "' for file location '" << filename << "': " << error_msg;
- }
- error_msg.clear();
- return kDexoptNeeded;
- }
-
- // Pass-up the information about if this is PIC.
- // TODO: Refactor this function to be less complicated.
- *oat_is_pic = oat_file->IsPic();
-
- bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
- uint32_t location_checksum = 0;
- const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename, nullptr,
- kReasonLogging);
- if (oat_dex_file != nullptr) {
- // If its not possible to read the classes.dex assume up-to-date as we won't be able to
- // compile it anyway.
- if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded found precompiled stripped file: "
- << filename << " for " << oat_filename << ": " << error_msg;
- }
- if (ClassLinker::VerifyOatChecksums(oat_file.get(), target_instruction_set, &error_msg)) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " is up-to-date for " << filename;
- }
- return kUpToDate;
- } else if (should_relocate_if_possible &&
- ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " needs to be relocated for " << filename;
- }
- return kPatchoatNeeded;
- } else {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " is out of date for " << filename;
- }
- return kDexoptNeeded;
- }
- // If we get here the file is out of date and we should use the system one to relocate.
- } else {
- if (ClassLinker::VerifyOatAndDexFileChecksums(oat_file.get(), filename, location_checksum,
- target_instruction_set, &error_msg)) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " is up-to-date for " << filename;
- }
- return kUpToDate;
- } else if (location_checksum == oat_dex_file->GetDexFileLocationChecksum()
- && should_relocate_if_possible
- && ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " needs to be relocated for " << filename;
- }
- return kPatchoatNeeded;
- } else {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " is out of date for " << filename;
- }
- return kDexoptNeeded;
- }
- }
- } else {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " does not contain " << filename;
- }
- return kDexoptNeeded;
- }
-}
-
static jbyte IsDexOptNeededInternal(JNIEnv* env, const char* filename,
const char* pkgname, const char* instruction_set, const jboolean defer) {
- // Spammy logging for kUpToDate
- const bool kVerboseLogging = false;
- // Logging of reason for returning kDexoptNeeded or kPatchoatNeeded.
- const bool kReasonLogging = true;
if ((filename == nullptr) || !OS::FileExists(filename)) {
LOG(ERROR) << "DexFile_isDexOptNeeded file '" << filename << "' does not exist";
@@ -466,117 +315,6 @@
return kUpToDate;
}
- // Always treat elements of the bootclasspath as up-to-date. The
- // fact that code is running at all means that this should be true.
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- // TODO: We're assuming that the 64 and 32 bit runtimes have identical
- // class paths. isDexOptNeeded will not necessarily be called on a runtime
- // that has the same instruction set as the file being dexopted.
- const std::vector<const DexFile*>& boot_class_path = class_linker->GetBootClassPath();
- for (size_t i = 0; i < boot_class_path.size(); i++) {
- if (boot_class_path[i]->GetLocation() == filename) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded ignoring boot class path file: " << filename;
- }
- return kUpToDate;
- }
- }
-
- bool force_system_only = false;
- bool require_system_version = false;
-
- // Check the profile file. We need to rerun dex2oat if the profile has changed significantly
- // since the last time, or it's new.
- // If the 'defer' argument is true then this will be retried later. In this case we
- // need to make sure that the profile file copy is not made so that we will get the
- // same result second time.
- std::string profile_file;
- std::string prev_profile_file;
- bool should_copy_profile = false;
- if (Runtime::Current()->GetProfilerOptions().IsEnabled() && (pkgname != nullptr)) {
- profile_file = GetDalvikCacheOrDie("profiles", false /* create_if_absent */)
- + std::string("/") + pkgname;
- prev_profile_file = profile_file + std::string("@old");
-
- struct stat profstat, prevstat;
- int e1 = stat(profile_file.c_str(), &profstat);
- int e1_errno = errno;
- int e2 = stat(prev_profile_file.c_str(), &prevstat);
- int e2_errno = errno;
- if (e1 < 0) {
- if (e1_errno != EACCES) {
- // No profile file, need to run dex2oat, unless we find a file in system
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal profile file " << profile_file << " doesn't exist. "
- << "Will check odex to see if we can find a working version.";
- }
- // Force it to only accept system files/files with versions in system.
- require_system_version = true;
- } else {
- LOG(INFO) << "DexFile_isDexOptNeededInternal recieved EACCES trying to stat profile file "
- << profile_file;
- }
- } else if (e2 == 0) {
- // There is a previous profile file. Check if the profile has changed significantly.
- // A change in profile is considered significant if X% (change_thr property) of the top K%
- // (compile_thr property) samples has changed.
- double top_k_threshold = Runtime::Current()->GetProfilerOptions().GetTopKThreshold();
- double change_threshold = Runtime::Current()->GetProfilerOptions().GetTopKChangeThreshold();
- double change_percent = 0.0;
- ProfileFile new_profile, old_profile;
- bool new_ok = new_profile.LoadFile(profile_file);
- bool old_ok = old_profile.LoadFile(prev_profile_file);
- if (!new_ok || !old_ok) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal Ignoring invalid profiles: "
- << (new_ok ? "" : profile_file) << " " << (old_ok ? "" : prev_profile_file);
- }
- } else {
- std::set<std::string> new_top_k, old_top_k;
- new_profile.GetTopKSamples(new_top_k, top_k_threshold);
- old_profile.GetTopKSamples(old_top_k, top_k_threshold);
- if (new_top_k.empty()) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal empty profile: " << profile_file;
- }
- // If the new topK is empty we shouldn't optimize so we leave the change_percent at 0.0.
- } else {
- std::set<std::string> diff;
- std::set_difference(new_top_k.begin(), new_top_k.end(), old_top_k.begin(), old_top_k.end(),
- std::inserter(diff, diff.end()));
- // TODO: consider using the usedPercentage instead of the plain diff count.
- change_percent = 100.0 * static_cast<double>(diff.size()) / static_cast<double>(new_top_k.size());
- if (kVerboseLogging) {
- std::set<std::string>::iterator end = diff.end();
- for (std::set<std::string>::iterator it = diff.begin(); it != end; it++) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal new in topK: " << *it;
- }
- }
- }
- }
-
- if (change_percent > change_threshold) {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal size of new profile file " << profile_file <<
- " is significantly different from old profile file " << prev_profile_file << " (top "
- << top_k_threshold << "% samples changed in proportion of " << change_percent << "%)";
- }
- should_copy_profile = !defer;
- // Force us to only accept system files.
- force_system_only = true;
- }
- } else if (e2_errno == ENOENT) {
- // Previous profile does not exist. Make a copy of the current one.
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal previous profile doesn't exist: " << prev_profile_file;
- }
- should_copy_profile = !defer;
- } else {
- PLOG(INFO) << "Unable to stat previous profile file " << prev_profile_file;
- }
- }
-
const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
if (target_instruction_set == kNone) {
ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
@@ -585,75 +323,43 @@
return 0;
}
- // Get the filename for odex file next to the dex file.
- std::string odex_filename(DexFilenameToOdexFilename(filename, target_instruction_set));
- // Get the filename for the dalvik-cache file
- std::string cache_dir;
- bool have_android_data = false;
- bool dalvik_cache_exists = false;
- bool is_global_cache = false;
- GetDalvikCache(instruction_set, false, &cache_dir, &have_android_data, &dalvik_cache_exists,
- &is_global_cache);
- std::string cache_filename; // was cache_location
- bool have_cache_filename = false;
- if (dalvik_cache_exists) {
- std::string error_msg;
- have_cache_filename = GetDalvikCacheFilename(filename, cache_dir.c_str(), &cache_filename,
- &error_msg);
- if (!have_cache_filename && kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal failed to find cache file for dex file " << filename
- << ": " << error_msg;
+ // TODO: Verify the dex location is well formed, and throw an IOException if
+ // not?
+
+ OatFileAssistant oat_file_assistant(filename, target_instruction_set, false, pkgname);
+
+ // Always treat elements of the bootclasspath as up-to-date.
+ if (oat_file_assistant.IsInBootClassPath()) {
+ return kUpToDate;
+ }
+
+ // TODO: Checking the profile should probably be done in the GetStatus()
+ // function. We have it here because GetStatus() should not be copying
+ // profile files. But who should be copying profile files?
+ if (oat_file_assistant.OdexFileIsOutOfDate()) {
+ // Needs recompile if profile has changed significantly.
+ if (Runtime::Current()->GetProfilerOptions().IsEnabled()) {
+ if (oat_file_assistant.IsProfileChangeSignificant()) {
+ if (!defer) {
+ oat_file_assistant.CopyProfileFile();
+ }
+ return kDexoptNeeded;
+ } else if (oat_file_assistant.ProfileExists()
+ && !oat_file_assistant.OldProfileExists()) {
+ if (!defer) {
+ oat_file_assistant.CopyProfileFile();
+ }
+ }
}
}
- bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
-
- jbyte dalvik_cache_decision = -1;
- // Lets try the cache first (since we want to load from there since thats where the relocated
- // versions will be).
- if (have_cache_filename && !force_system_only) {
- bool oat_is_pic;
- // We can use the dalvik-cache if we find a good file.
- dalvik_cache_decision =
- IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(cache_filename, filename,
- target_instruction_set, &oat_is_pic);
-
- // Apps that are compiled with --compile-pic never need to be patchoat-d
- if (oat_is_pic && dalvik_cache_decision == kPatchoatNeeded) {
- dalvik_cache_decision = kUpToDate;
- }
- // We will only return DexOptNeeded if both the cache and system return it.
- if (dalvik_cache_decision != kDexoptNeeded && !require_system_version) {
- CHECK(!(dalvik_cache_decision == kPatchoatNeeded && !should_relocate_if_possible))
- << "May not return PatchoatNeeded when patching is disabled.";
- return dalvik_cache_decision;
- }
- // We couldn't find one thats easy. We should now try the system.
+ OatFileAssistant::Status status = oat_file_assistant.GetStatus();
+ switch (status) {
+ case OatFileAssistant::kUpToDate: return kUpToDate;
+ case OatFileAssistant::kNeedsRelocation: return kPatchoatNeeded;
+ case OatFileAssistant::kOutOfDate: return kDexoptNeeded;
}
-
- bool oat_is_pic;
- jbyte system_decision =
- IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(odex_filename, filename,
- target_instruction_set, &oat_is_pic);
- CHECK(!(system_decision == kPatchoatNeeded && !should_relocate_if_possible))
- << "May not return PatchoatNeeded when patching is disabled.";
-
- // Apps that are compiled with --compile-pic never need to be patchoat-d
- if (oat_is_pic && system_decision == kPatchoatNeeded) {
- system_decision = kUpToDate;
- }
-
- if (require_system_version && system_decision == kPatchoatNeeded
- && dalvik_cache_decision == kUpToDate) {
- // We have a version from system relocated to the cache. Return it.
- return dalvik_cache_decision;
- }
-
- if (should_copy_profile && system_decision == kDexoptNeeded) {
- CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
- }
-
- return system_decision;
+ UNREACHABLE();
}
static jbyte DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 6c82eb2..57ca2b1 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -93,8 +93,7 @@
int fd = dup(originalFd);
if (fd < 0) {
ScopedObjectAccess soa(env);
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/RuntimeException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/RuntimeException;",
"dup(%d) failed: %s", originalFd, strerror(errno));
return;
}
@@ -148,8 +147,7 @@
static void ThrowUnsupportedOperationException(JNIEnv* env) {
ScopedObjectAccess soa(env);
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewException(throw_location, "Ljava/lang/UnsupportedOperationException;", NULL);
+ soa.Self()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", NULL);
}
static void VMDebug_startInstructionCounting(JNIEnv* env, jclass) {
@@ -196,7 +194,7 @@
// Only one of these may be NULL.
if (javaFilename == NULL && javaFd == NULL) {
ScopedObjectAccess soa(env);
- ThrowNullPointerException(NULL, "fileName == null && fd == null");
+ ThrowNullPointerException("fileName == null && fd == null");
return;
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 599d97f..6e3f1bc 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -72,7 +72,7 @@
}
mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
if (UNLIKELY(element_class == nullptr)) {
- ThrowNullPointerException(NULL, "element class == null");
+ ThrowNullPointerException("element class == null");
return nullptr;
}
Runtime* runtime = Runtime::Current();
@@ -97,7 +97,7 @@
}
mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
if (UNLIKELY(element_class == nullptr)) {
- ThrowNullPointerException(NULL, "element class == null");
+ ThrowNullPointerException("element class == null");
return nullptr;
}
Runtime* runtime = Runtime::Current();
@@ -120,7 +120,7 @@
ScopedFastNativeObjectAccess soa(env);
mirror::Array* array = soa.Decode<mirror::Array*>(javaArray);
if (!array->IsArrayInstance()) {
- ThrowIllegalArgumentException(NULL, "not an array");
+ ThrowIllegalArgumentException("not an array");
return 0;
}
if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 1ea75f3..60d14e9 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -55,8 +55,7 @@
// is especially handy for array types, since we want to avoid
// auto-generating bogus array classes.
if (!IsValidBinaryClassName(name.c_str())) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ClassNotFoundException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/ClassNotFoundException;",
"Invalid name: %s", name.c_str());
return nullptr;
}
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 4ea2546..6afe83b 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -29,7 +29,7 @@
static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) {
ScopedFastNativeObjectAccess soa(env);
if (UNLIKELY(javaRhs == NULL)) {
- ThrowNullPointerException(NULL, "rhs == null");
+ ThrowNullPointerException("rhs == null");
return -1;
} else {
return soa.Decode<mirror::String*>(javaThis)->CompareTo(soa.Decode<mirror::String*>(javaRhs));
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index f79be56..736b42b 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -39,8 +39,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string actualType(PrettyTypeOf(array));
Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;",
+ self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
"%s of type %s is not an array", identifier, actualType.c_str());
}
@@ -52,11 +51,11 @@
// Null pointer checks.
if (UNLIKELY(javaSrc == nullptr)) {
- ThrowNullPointerException(nullptr, "src == null");
+ ThrowNullPointerException("src == null");
return;
}
if (UNLIKELY(javaDst == nullptr)) {
- ThrowNullPointerException(nullptr, "dst == null");
+ ThrowNullPointerException("dst == null");
return;
}
@@ -78,8 +77,7 @@
if (UNLIKELY(srcPos < 0) || UNLIKELY(dstPos < 0) || UNLIKELY(count < 0) ||
UNLIKELY(srcPos > srcArray->GetLength() - count) ||
UNLIKELY(dstPos > dstArray->GetLength() - count)) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
"src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d",
srcArray->GetLength(), srcPos, dstArray->GetLength(), dstPos,
count);
@@ -132,8 +130,7 @@
srcComponentType->IsPrimitive())) {
std::string srcType(PrettyTypeOf(srcArray));
std::string dstType(PrettyTypeOf(dstArray));
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
"Incompatible types: src=%s, dst=%s",
srcType.c_str(), dstType.c_str());
return;
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index e4b8db1..d3b52ba 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -100,7 +100,7 @@
ScopedObjectAccess soa(env);
mirror::Object* object = soa.Decode<mirror::Object*>(java_object);
if (object == NULL) {
- ThrowNullPointerException(NULL, "object == null");
+ ThrowNullPointerException("object == null");
return JNI_FALSE;
}
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 3121a90..765f548 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -42,8 +42,7 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> c(hs.NewHandle(m->GetDeclaringClass()));
if (UNLIKELY(c->IsAbstract())) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/InstantiationException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
"Can't instantiate %s %s",
c->IsInterface() ? "interface" : "abstract class",
PrettyDescriptor(c.Get()).c_str());
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 2cebf02..9c5bde9 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -34,7 +34,7 @@
mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (kIsSet && field->IsFinal()) {
- ThrowIllegalAccessException(nullptr,
+ ThrowIllegalAccessException(
StringPrintf("Cannot set %s field %s of class %s",
PrettyJavaAccessFlags(field->GetAccessFlags()).c_str(),
PrettyField(field).c_str(),
@@ -45,7 +45,7 @@
mirror::Class* calling_class = nullptr;
if (!VerifyAccess(self, obj, field->GetDeclaringClass(), field->GetAccessFlags(),
&calling_class)) {
- ThrowIllegalAccessException(nullptr,
+ ThrowIllegalAccessException(
StringPrintf("Class %s cannot access %s field %s of class %s",
calling_class == nullptr ? "null" : PrettyClass(calling_class).c_str(),
PrettyJavaAccessFlags(field->GetAccessFlags()).c_str(),
@@ -98,8 +98,8 @@
// Never okay.
break;
}
- ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ ThrowIllegalArgumentException(StringPrintf("Not a primitive field: %s",
+ PrettyField(f).c_str()).c_str());
return false;
}
@@ -190,7 +190,7 @@
}
// Widen it if necessary (and possible).
JValue wide_value;
- if (!ConvertPrimitiveValue(nullptr, false, field_type, kPrimitiveType, field_value,
+ if (!ConvertPrimitiveValue(false, field_type, kPrimitiveType, field_value,
&wide_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
@@ -270,8 +270,8 @@
FALLTHROUGH_INTENDED;
case Primitive::kPrimVoid:
// Never okay.
- ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ ThrowIllegalArgumentException(StringPrintf("Not a primitive field: %s",
+ PrettyField(f).c_str()).c_str());
return;
}
}
@@ -329,14 +329,14 @@
}
Primitive::Type field_type = f->GetTypeAsPrimitiveType();
if (UNLIKELY(field_type == Primitive::kPrimNot)) {
- ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ ThrowIllegalArgumentException(StringPrintf("Not a primitive field: %s",
+ PrettyField(f).c_str()).c_str());
return;
}
// Widen the value if necessary (and possible).
JValue wide_value;
- if (!ConvertPrimitiveValue(nullptr, false, kPrimitiveType, field_type, new_value, &wide_value)) {
+ if (!ConvertPrimitiveValue(false, kPrimitiveType, field_type, new_value, &wide_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return;
}
diff --git a/runtime/oat.h b/runtime/oat.h
index f973b28..79cb024 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '5', '6', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '5', '8', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
new file mode 100644
index 0000000..f87fa4f
--- /dev/null
+++ b/runtime/oat_file_assistant.cc
@@ -0,0 +1,952 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat_file_assistant.h"
+
+#include <fcntl.h>
+#ifdef __linux__
+#include <sys/sendfile.h>
+#else
+#include <sys/socket.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <set>
+
+#include "base/logging.h"
+#include "base/stringprintf.h"
+#include "class_linker.h"
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
+#include "image.h"
+#include "oat.h"
+#include "os.h"
+#include "profiler.h"
+#include "runtime.h"
+#include "ScopedFd.h"
+#include "utils.h"
+
+namespace art {
+
+OatFileAssistant::OatFileAssistant(const char* dex_location,
+ const InstructionSet isa,
+ bool load_executable)
+ : OatFileAssistant(dex_location, nullptr, isa, load_executable, nullptr) { }
+
+OatFileAssistant::OatFileAssistant(const char* dex_location,
+ const char* oat_location,
+ const InstructionSet isa,
+ bool load_executable)
+ : OatFileAssistant(dex_location, oat_location, isa, load_executable, nullptr) { }
+
+OatFileAssistant::OatFileAssistant(const char* dex_location,
+ const InstructionSet isa,
+ bool load_executable,
+ const char* package_name)
+ : OatFileAssistant(dex_location, nullptr, isa, load_executable, package_name) { }
+
+OatFileAssistant::OatFileAssistant(const char* dex_location,
+ const char* oat_location,
+ const InstructionSet isa,
+ bool load_executable,
+ const char* package_name)
+ : dex_location_(dex_location), isa_(isa),
+ package_name_(package_name), load_executable_(load_executable) {
+ if (load_executable_ && isa != kRuntimeISA) {
+ LOG(WARNING) << "OatFileAssistant: Load executable specified, "
+ << "but isa is not kRuntimeISA. Will not attempt to load executable.";
+ load_executable_ = false;
+ }
+
+ // If the user gave a target oat location, save that as the cached oat
+ // location now so we won't try to construct the default location later.
+ if (oat_location != nullptr) {
+ cached_oat_file_name_ = std::string(oat_location);
+ cached_oat_file_name_attempted_ = true;
+ cached_oat_file_name_found_ = true;
+ }
+
+ // If there is no package name given, we will not be able to find any
+ // profiles associated with this dex location. Preemptively mark that to
+ // be the case, rather than trying to find and load the profiles later.
+ // Similarly, if profiling is disabled.
+ if (package_name == nullptr
+ || !Runtime::Current()->GetProfilerOptions().IsEnabled()) {
+ profile_load_attempted_ = true;
+ profile_load_succeeded_ = false;
+ old_profile_load_attempted_ = true;
+ old_profile_load_succeeded_ = false;
+ }
+}
+
+OatFileAssistant::~OatFileAssistant() {
+ // Clean up the lock file.
+ if (lock_file_.get() != nullptr) {
+ lock_file_->Erase();
+ TEMP_FAILURE_RETRY(unlink(lock_file_->GetPath().c_str()));
+ }
+}
+
+bool OatFileAssistant::IsInBootClassPath() {
+ // Note: We check the current boot class path, regardless of the ISA
+ // specified by the user. This is okay, because the boot class path should
+ // be the same for all ISAs.
+ // TODO: Can we verify the boot class path is the same for all ISAs?
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ const auto& boot_class_path = class_linker->GetBootClassPath();
+ for (size_t i = 0; i < boot_class_path.size(); i++) {
+ if (boot_class_path[i]->GetLocation() == std::string(dex_location_)) {
+ VLOG(oat) << "Dex location " << dex_location_ << " is in boot class path";
+ return true;
+ }
+ }
+ return false;
+}
+
+bool OatFileAssistant::Lock(std::string* error_msg) {
+ CHECK(error_msg != nullptr);
+ CHECK(lock_file_.get() == nullptr) << "OatFileAssistant::Lock already acquired";
+
+ if (OatFileName() == nullptr) {
+ *error_msg = "Failed to determine lock file";
+ return false;
+ }
+ std::string lock_file_name = *OatFileName() + ".flock";
+
+ lock_file_.reset(OS::CreateEmptyFile(lock_file_name.c_str()));
+ if (lock_file_.get() == nullptr) {
+ *error_msg = "Failed to create lock file " + lock_file_name;
+ return false;
+ }
+
+ if (!flock_.Init(lock_file_.get(), error_msg)) {
+ TEMP_FAILURE_RETRY(unlink(lock_file_name.c_str()));
+ return false;
+ }
+ return true;
+}
+
+OatFileAssistant::Status OatFileAssistant::GetStatus() {
+ // TODO: If the profiling code is ever restored, it's worth considering
+ // whether we should check to see if the profile is out of date here.
+
+ if (OdexFileIsOutOfDate()) {
+ // The DEX file is not pre-compiled.
+ // TODO: What if the oat file is not out of date? Could we relocate it
+ // from itself?
+ return OatFileIsUpToDate() ? kUpToDate : kOutOfDate;
+ } else {
+ // The DEX file is pre-compiled. If the oat file isn't up to date, we can
+ // patch the pre-compiled version rather than recompiling.
+ if (OatFileIsUpToDate() || OdexFileIsUpToDate()) {
+ return kUpToDate;
+ } else {
+ return kNeedsRelocation;
+ }
+ }
+}
+
+bool OatFileAssistant::MakeUpToDate(std::string* error_msg) {
+ switch (GetStatus()) {
+ case kUpToDate: return true;
+ case kNeedsRelocation: return RelocateOatFile(error_msg);
+ case kOutOfDate: return GenerateOatFile(error_msg);
+ }
+ UNREACHABLE();
+}
+
+std::unique_ptr<OatFile> OatFileAssistant::GetBestOatFile() {
+ if (OatFileIsUpToDate()) {
+ oat_file_released_ = true;
+ return std::move(cached_oat_file_);
+ }
+
+ if (OdexFileIsUpToDate()) {
+ oat_file_released_ = true;
+ return std::move(cached_odex_file_);
+ }
+
+ if (load_executable_) {
+ VLOG(oat) << "Oat File Assistant: No relocated oat file found,"
+ << " attempting to fall back to interpreting oat file instead.";
+
+ if (!OatFileIsOutOfDate()) {
+ load_executable_ = false;
+ ClearOatFileCache();
+ if (!OatFileIsOutOfDate()) {
+ oat_file_released_ = true;
+ return std::move(cached_oat_file_);
+ }
+ }
+
+ if (!OdexFileIsOutOfDate()) {
+ load_executable_ = false;
+ ClearOdexFileCache();
+ if (!OdexFileIsOutOfDate()) {
+ oat_file_released_ = true;
+ return std::move(cached_odex_file_);
+ }
+ }
+ }
+
+ return std::unique_ptr<OatFile>();
+}
+
+std::vector<std::unique_ptr<const DexFile>> OatFileAssistant::LoadDexFiles(
+ const OatFile& oat_file, const char* dex_location) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+
+ // Load the primary dex file.
+ std::string error_msg;
+ const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(
+ dex_location, nullptr, false);
+ if (oat_dex_file == nullptr) {
+ LOG(WARNING) << "Attempt to load out-of-date oat file "
+ << oat_file.GetLocation() << " for dex location " << dex_location;
+ return std::vector<std::unique_ptr<const DexFile>>();
+ }
+
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
+ if (dex_file.get() == nullptr) {
+ LOG(WARNING) << "Failed to open dex file from oat dex file: " << error_msg;
+ return std::vector<std::unique_ptr<const DexFile>>();
+ }
+ dex_files.push_back(std::move(dex_file));
+
+ // Load secondary multidex files
+ for (int i = 1; ; i++) {
+ std::string secondary_dex_location = DexFile::GetMultiDexClassesDexName(i, dex_location);
+ oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
+ if (oat_dex_file == NULL) {
+ // There are no more secondary dex files to load.
+ break;
+ }
+
+ dex_file = oat_dex_file->OpenDexFile(&error_msg);
+ if (dex_file.get() == nullptr) {
+ LOG(WARNING) << "Failed to open dex file from oat dex file: " << error_msg;
+ return std::vector<std::unique_ptr<const DexFile>>();
+ }
+ dex_files.push_back(std::move(dex_file));
+ }
+ return dex_files;
+}
+
+const std::string* OatFileAssistant::OdexFileName() {
+ if (!cached_odex_file_name_attempted_) {
+ CHECK(dex_location_ != nullptr) << "OatFileAssistant: null dex location";
+ cached_odex_file_name_attempted_ = true;
+
+ std::string error_msg;
+ cached_odex_file_name_found_ = DexFilenameToOdexFilename(
+ dex_location_, isa_, &cached_odex_file_name_, &error_msg);
+ if (!cached_odex_file_name_found_) {
+ // If we can't figure out the odex file, we treat it as if the odex
+ // file was inaccessible.
+ LOG(WARNING) << "Failed to determine odex file name: " << error_msg;
+ }
+ }
+ return cached_odex_file_name_found_ ? &cached_odex_file_name_ : nullptr;
+}
+
+bool OatFileAssistant::OdexFileExists() {
+ return GetOdexFile() != nullptr;
+}
+
+OatFileAssistant::Status OatFileAssistant::OdexFileStatus() {
+ if (OdexFileIsOutOfDate()) {
+ return kOutOfDate;
+ }
+ if (OdexFileIsUpToDate()) {
+ return kUpToDate;
+ }
+ return kNeedsRelocation;
+}
+
+bool OatFileAssistant::OdexFileIsOutOfDate() {
+ if (!odex_file_is_out_of_date_attempted_) {
+ odex_file_is_out_of_date_attempted_ = true;
+ const OatFile* odex_file = GetOdexFile();
+ if (odex_file == nullptr) {
+ cached_odex_file_is_out_of_date_ = true;
+ } else {
+ cached_odex_file_is_out_of_date_ = GivenOatFileIsOutOfDate(*odex_file);
+ }
+ }
+ return cached_odex_file_is_out_of_date_;
+}
+
+bool OatFileAssistant::OdexFileNeedsRelocation() {
+ return OdexFileStatus() == kNeedsRelocation;
+}
+
+bool OatFileAssistant::OdexFileIsUpToDate() {
+ if (!odex_file_is_up_to_date_attempted_) {
+ odex_file_is_up_to_date_attempted_ = true;
+ const OatFile* odex_file = GetOdexFile();
+ if (odex_file == nullptr) {
+ cached_odex_file_is_up_to_date_ = false;
+ } else {
+ cached_odex_file_is_up_to_date_ = GivenOatFileIsUpToDate(*odex_file);
+ }
+ }
+ return cached_odex_file_is_up_to_date_;
+}
+
+const std::string* OatFileAssistant::OatFileName() {
+ if (!cached_oat_file_name_attempted_) {
+ cached_oat_file_name_attempted_ = true;
+
+ // Compute the oat file name from the dex location.
+ CHECK(dex_location_ != nullptr) << "OatFileAssistant: null dex location";
+
+ // TODO: The oat file assistant should be the definitive place for
+ // determining the oat file name from the dex location, not
+ // GetDalvikCacheFilename.
+ std::string cache_dir = StringPrintf("%s%s",
+ DalvikCacheDirectory().c_str(), GetInstructionSetString(isa_));
+ std::string error_msg;
+ cached_oat_file_name_found_ = GetDalvikCacheFilename(dex_location_,
+ cache_dir.c_str(), &cached_oat_file_name_, &error_msg);
+ if (!cached_oat_file_name_found_) {
+ // If we can't determine the oat file name, we treat the oat file as
+ // inaccessible.
+ LOG(WARNING) << "Failed to determine oat file name for dex location "
+ << dex_location_ << ": " << error_msg;
+ }
+ }
+ return cached_oat_file_name_found_ ? &cached_oat_file_name_ : nullptr;
+}
+
+bool OatFileAssistant::OatFileExists() {
+ return GetOatFile() != nullptr;
+}
+
+OatFileAssistant::Status OatFileAssistant::OatFileStatus() {
+ if (OatFileIsOutOfDate()) {
+ return kOutOfDate;
+ }
+ if (OatFileIsUpToDate()) {
+ return kUpToDate;
+ }
+ return kNeedsRelocation;
+}
+
+bool OatFileAssistant::OatFileIsOutOfDate() {
+ if (!oat_file_is_out_of_date_attempted_) {
+ oat_file_is_out_of_date_attempted_ = true;
+ const OatFile* oat_file = GetOatFile();
+ if (oat_file == nullptr) {
+ cached_oat_file_is_out_of_date_ = true;
+ } else {
+ cached_oat_file_is_out_of_date_ = GivenOatFileIsOutOfDate(*oat_file);
+ }
+ }
+ return cached_oat_file_is_out_of_date_;
+}
+
+bool OatFileAssistant::OatFileNeedsRelocation() {
+ return OatFileStatus() == kNeedsRelocation;
+}
+
+bool OatFileAssistant::OatFileIsUpToDate() {
+ if (!oat_file_is_up_to_date_attempted_) {
+ oat_file_is_up_to_date_attempted_ = true;
+ const OatFile* oat_file = GetOatFile();
+ if (oat_file == nullptr) {
+ cached_oat_file_is_up_to_date_ = false;
+ } else {
+ cached_oat_file_is_up_to_date_ = GivenOatFileIsUpToDate(*oat_file);
+ }
+ }
+ return cached_oat_file_is_up_to_date_;
+}
+
+OatFileAssistant::Status OatFileAssistant::GivenOatFileStatus(const OatFile& file) {
+ // TODO: This could cause GivenOatFileIsOutOfDate to be called twice, which
+ // is more work than we need to do. If performance becomes a concern, and
+ // this method is actually called, this should be fixed.
+ if (GivenOatFileIsOutOfDate(file)) {
+ return kOutOfDate;
+ }
+ if (GivenOatFileIsUpToDate(file)) {
+ return kUpToDate;
+ }
+ return kNeedsRelocation;
+}
+
+bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) {
+ // Verify the dex checksum.
+ // Note: GetOatDexFile will return NULL if the dex checksum doesn't match
+ // what we provide, which verifies the primary dex checksum for us.
+ const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum();
+ const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
+ dex_location_, dex_checksum_pointer, false);
+ if (oat_dex_file == NULL) {
+ return true;
+ }
+
+ // Verify the dex checksums for any secondary multidex files
+ for (int i = 1; ; i++) {
+ std::string secondary_dex_location
+ = DexFile::GetMultiDexClassesDexName(i, dex_location_);
+ const OatFile::OatDexFile* secondary_oat_dex_file
+ = file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
+ if (secondary_oat_dex_file == NULL) {
+ // There are no more secondary dex files to check.
+ break;
+ }
+
+ std::string error_msg;
+ uint32_t expected_secondary_checksum = 0;
+ if (DexFile::GetChecksum(secondary_dex_location.c_str(),
+ &expected_secondary_checksum, &error_msg)) {
+ uint32_t actual_secondary_checksum
+ = secondary_oat_dex_file->GetDexFileLocationChecksum();
+ if (expected_secondary_checksum != actual_secondary_checksum) {
+ VLOG(oat) << "Dex checksum does not match for secondary dex: "
+ << secondary_dex_location
+ << ". Expected: " << expected_secondary_checksum
+ << ", Actual: " << actual_secondary_checksum;
+ return false;
+ }
+ } else {
+ // If we can't get the checksum for the secondary location, we assume
+ // the dex checksum is up to date for this and all other secondary dex
+ // files.
+ break;
+ }
+ }
+
+ // Verify the image checksum
+ const ImageInfo* image_info = GetImageInfo();
+ if (image_info == nullptr) {
+ VLOG(oat) << "No image for oat image checksum to match against.";
+ return true;
+ }
+
+ if (file.GetOatHeader().GetImageFileLocationOatChecksum() != image_info->oat_checksum) {
+ VLOG(oat) << "Oat image checksum does not match image checksum.";
+ return true;
+ }
+
+ // The checksums are all good; the dex file is not out of date.
+ return false;
+}
+
+bool OatFileAssistant::GivenOatFileNeedsRelocation(const OatFile& file) {
+ return GivenOatFileStatus(file) == kNeedsRelocation;
+}
+
+bool OatFileAssistant::GivenOatFileIsUpToDate(const OatFile& file) {
+ if (GivenOatFileIsOutOfDate(file)) {
+ return false;
+ }
+
+ if (file.IsPic()) {
+ return true;
+ }
+
+ const ImageInfo* image_info = GetImageInfo();
+ if (image_info == nullptr) {
+ VLOG(oat) << "No image for to check oat relocation against.";
+ return false;
+ }
+
+ // Verify the oat_data_begin recorded for the image in the oat file matches
+ // the actual oat_data_begin for boot.oat in the image.
+ const OatHeader& oat_header = file.GetOatHeader();
+ uintptr_t oat_data_begin = oat_header.GetImageFileLocationOatDataBegin();
+ if (oat_data_begin != image_info->oat_data_begin) {
+ VLOG(oat) << file.GetLocation() <<
+ ": Oat file image oat_data_begin (" << oat_data_begin << ")"
+ << " does not match actual image oat_data_begin ("
+ << image_info->oat_data_begin << ")";
+ return false;
+ }
+
+ // Verify the oat_patch_delta recorded for the image in the oat file matches
+ // the actual oat_patch_delta for the image.
+ int32_t oat_patch_delta = oat_header.GetImagePatchDelta();
+ if (oat_patch_delta != image_info->patch_delta) {
+ VLOG(oat) << file.GetLocation() <<
+ ": Oat file image patch delta (" << oat_patch_delta << ")"
+ << " does not match actual image patch delta ("
+ << image_info->patch_delta << ")";
+ return false;
+ }
+ return true;
+}
+
+bool OatFileAssistant::ProfileExists() {
+ return GetProfile() != nullptr;
+}
+
+bool OatFileAssistant::OldProfileExists() {
+ return GetOldProfile() != nullptr;
+}
+
+// TODO: The IsProfileChangeSignificant implementation was copied from likely
+// bit-rotted code.
+bool OatFileAssistant::IsProfileChangeSignificant() {
+ ProfileFile* profile = GetProfile();
+ if (profile == nullptr) {
+ return false;
+ }
+
+ ProfileFile* old_profile = GetOldProfile();
+ if (old_profile == nullptr) {
+ return false;
+ }
+
+ // TODO: The following code to compare two profile files should live with
+ // the rest of the profiler code, not the oat file assistant code.
+
+ // A change in profile is considered significant if X% (change_thr property)
+ // of the top K% (compile_thr property) samples has changed.
+ const ProfilerOptions& options = Runtime::Current()->GetProfilerOptions();
+ const double top_k_threshold = options.GetTopKThreshold();
+ const double change_threshold = options.GetTopKChangeThreshold();
+ std::set<std::string> top_k, old_top_k;
+ profile->GetTopKSamples(top_k, top_k_threshold);
+ old_profile->GetTopKSamples(old_top_k, top_k_threshold);
+ std::set<std::string> diff;
+ std::set_difference(top_k.begin(), top_k.end(), old_top_k.begin(),
+ old_top_k.end(), std::inserter(diff, diff.end()));
+
+ // TODO: consider using the usedPercentage instead of the plain diff count.
+ double change_percent = 100.0 * static_cast<double>(diff.size())
+ / static_cast<double>(top_k.size());
+ std::set<std::string>::iterator end = diff.end();
+ for (std::set<std::string>::iterator it = diff.begin(); it != end; it++) {
+ VLOG(oat) << "Profile new in topK: " << *it;
+ }
+
+ if (change_percent > change_threshold) {
+ VLOG(oat) << "Oat File Assistant: Profile for " << dex_location_
+ << "has changed significantly: (top "
+ << top_k_threshold << "% samples changed in proportion of "
+ << change_percent << "%)";
+ return true;
+ }
+ return false;
+}
+
+// TODO: The CopyProfileFile implementation was copied from likely bit-rotted
+// code.
+void OatFileAssistant::CopyProfileFile() {
+ if (!ProfileExists()) {
+ return;
+ }
+
+ std::string profile_name = ProfileFileName();
+ std::string old_profile_name = OldProfileFileName();
+
+ ScopedFd src(open(old_profile_name.c_str(), O_RDONLY));
+ if (src.get() == -1) {
+ PLOG(WARNING) << "Failed to open profile file " << old_profile_name
+ << ". My uid:gid is " << getuid() << ":" << getgid();
+ return;
+ }
+
+ struct stat stat_src;
+ if (fstat(src.get(), &stat_src) == -1) {
+ PLOG(WARNING) << "Failed to get stats for profile file " << old_profile_name
+ << ". My uid:gid is " << getuid() << ":" << getgid();
+ return;
+ }
+
+ // Create the copy with rw------- (only accessible by system)
+ ScopedFd dst(open(profile_name.c_str(), O_WRONLY|O_CREAT|O_TRUNC, 0600));
+ if (dst.get() == -1) {
+ PLOG(WARNING) << "Failed to create/write prev profile file " << profile_name
+ << ". My uid:gid is " << getuid() << ":" << getgid();
+ return;
+ }
+
+#ifdef __linux__
+ if (sendfile(dst.get(), src.get(), nullptr, stat_src.st_size) == -1) {
+#else
+ off_t len;
+ if (sendfile(dst.get(), src.get(), 0, &len, nullptr, 0) == -1) {
+#endif
+ PLOG(WARNING) << "Failed to copy profile file " << old_profile_name
+ << " to " << profile_name << ". My uid:gid is " << getuid()
+ << ":" << getgid();
+ }
+}
+
+bool OatFileAssistant::RelocateOatFile(std::string* error_msg) {
+ CHECK(error_msg != nullptr);
+
+ if (OdexFileName() == nullptr) {
+ *error_msg = "Patching of oat file for dex location "
+ + std::string(dex_location_)
+ + " not attempted because the odex file name could not be determined.";
+ return false;
+ }
+ const std::string& odex_file_name = *OdexFileName();
+
+ if (OatFileName() == nullptr) {
+ *error_msg = "Patching of oat file for dex location "
+ + std::string(dex_location_)
+ + " not attempted because the oat file name could not be determined.";
+ return false;
+ }
+ const std::string& oat_file_name = *OatFileName();
+
+ const ImageInfo* image_info = GetImageInfo();
+ Runtime* runtime = Runtime::Current();
+ if (image_info == nullptr) {
+ *error_msg = "Patching of oat file " + oat_file_name
+ + " not attempted because no image location was found.";
+ return false;
+ }
+
+ if (!runtime->IsDex2OatEnabled()) {
+ *error_msg = "Patching of oat file " + oat_file_name
+ + " not attempted because dex2oat is disabled";
+ return false;
+ }
+
+ std::vector<std::string> argv;
+ argv.push_back(runtime->GetPatchoatExecutable());
+ argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(isa_)));
+ argv.push_back("--input-oat-file=" + odex_file_name);
+ argv.push_back("--output-oat-file=" + oat_file_name);
+ argv.push_back("--patched-image-location=" + image_info->location);
+
+ std::string command_line(Join(argv, ' '));
+ if (!Exec(argv, error_msg)) {
+ // Manually delete the file. This ensures there is no garbage left over if
+ // the process unexpectedly died.
+ TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
+ return false;
+ }
+
+ // Mark that the oat file has changed and we should try to reload.
+ ClearOatFileCache();
+ return true;
+}
+
+bool OatFileAssistant::GenerateOatFile(std::string* error_msg) {
+ CHECK(error_msg != nullptr);
+
+ if (OatFileName() == nullptr) {
+ *error_msg = "Generation of oat file for dex location "
+ + std::string(dex_location_)
+ + " not attempted because the oat file name could not be determined.";
+ return false;
+ }
+ const std::string& oat_file_name = *OatFileName();
+
+ Runtime* runtime = Runtime::Current();
+ if (!runtime->IsDex2OatEnabled()) {
+ *error_msg = "Generation of oat file " + oat_file_name
+ + " not attempted because dex2oat is disabled";
+ return false;
+ }
+
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + std::string(dex_location_));
+ args.push_back("--oat-file=" + oat_file_name);
+
+ // dex2oat ignores missing dex files and doesn't report an error.
+ // Check explicitly here so we can detect the error properly.
+ // TODO: Why does dex2oat behave that way?
+ if (!OS::FileExists(dex_location_)) {
+ *error_msg = "Dex location " + std::string(dex_location_) + " does not exists.";
+ return false;
+ }
+
+ if (!Dex2Oat(args, error_msg)) {
+ // Manually delete the file. This ensures there is no garbage left over if
+ // the process unexpectedly died.
+ TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
+ return false;
+ }
+
+ // Mark that the oat file has changed and we should try to reload.
+ ClearOatFileCache();
+ return true;
+}
+
+bool OatFileAssistant::Dex2Oat(const std::vector<std::string>& args,
+ std::string* error_msg) {
+ Runtime* runtime = Runtime::Current();
+ std::string image_location = ImageLocation();
+ if (image_location.empty()) {
+ *error_msg = "No image location found for Dex2Oat.";
+ return false;
+ }
+
+ std::vector<std::string> argv;
+ argv.push_back(runtime->GetCompilerExecutable());
+ argv.push_back("--runtime-arg");
+ argv.push_back("-classpath");
+ argv.push_back("--runtime-arg");
+ argv.push_back(runtime->GetClassPathString());
+ runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
+
+ if (!runtime->IsVerificationEnabled()) {
+ argv.push_back("--compiler-filter=verify-none");
+ }
+
+ if (runtime->MustRelocateIfPossible()) {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xrelocate");
+ } else {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xnorelocate");
+ }
+
+ if (!kIsTargetBuild) {
+ argv.push_back("--host");
+ }
+
+ argv.push_back("--boot-image=" + image_location);
+
+ std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
+ argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
+
+ argv.insert(argv.end(), args.begin(), args.end());
+
+ std::string command_line(Join(argv, ' '));
+ return Exec(argv, error_msg);
+}
+
+bool OatFileAssistant::DexFilenameToOdexFilename(const std::string& location,
+ InstructionSet isa, std::string* odex_filename, std::string* error_msg) {
+ CHECK(odex_filename != nullptr);
+ CHECK(error_msg != nullptr);
+
+ // The odex file name is formed by replacing the dex_location extension with
+ // .odex and inserting an isa directory. For example:
+ // location = /foo/bar/baz.jar
+ // odex_location = /foo/bar/<isa>/baz.odex
+
+ // Find the directory portion of the dex location and add the isa directory.
+ size_t pos = location.rfind('/');
+ if (pos == std::string::npos) {
+ *error_msg = "Dex location " + location + " has no directory.";
+ return false;
+ }
+ std::string dir = location.substr(0, pos+1);
+ dir += std::string(GetInstructionSetString(isa));
+
+ // Find the file portion of the dex location.
+ std::string file;
+ if (pos == std::string::npos) {
+ file = location;
+ } else {
+ file = location.substr(pos+1);
+ }
+
+ // Get the base part of the file without the extension.
+ pos = file.rfind('.');
+ if (pos == std::string::npos) {
+ *error_msg = "Dex location " + location + " has no extension.";
+ return false;
+ }
+ std::string base = file.substr(0, pos);
+
+ *odex_filename = dir + "/" + base + ".odex";
+ return true;
+}
+
+std::string OatFileAssistant::DalvikCacheDirectory() {
+ // Note: We don't cache this, because it will only be called once by
+ // OatFileName, and we don't care about the performance of the profiling
+ // code, which isn't used in practice.
+
+ // TODO: The work done in GetDalvikCache is overkill for what we need.
+ // Ideally a new API for getting the DalvikCacheDirectory the way we want
+ // (without existence testing, creation, or death) is provided with the rest
+ // of the GetDalvikCache family of functions. Until such an API is in place,
+ // we use GetDalvikCache to avoid duplicating the logic for determining the
+ // dalvik cache directory.
+ std::string result;
+ bool have_android_data;
+ bool dalvik_cache_exists;
+ bool is_global_cache;
+ GetDalvikCache("", false, &result, &have_android_data, &dalvik_cache_exists, &is_global_cache);
+ return result;
+}
+
+std::string OatFileAssistant::ProfileFileName() {
+ if (package_name_ != nullptr) {
+ return DalvikCacheDirectory() + std::string("profiles/") + package_name_;
+ }
+ return "";
+}
+
+std::string OatFileAssistant::OldProfileFileName() {
+ std::string profile_name = ProfileFileName();
+ if (profile_name.empty()) {
+ return "";
+ }
+ return profile_name + "@old";
+}
+
+std::string OatFileAssistant::ImageLocation() {
+ Runtime* runtime = Runtime::Current();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ if (image_space == nullptr) {
+ return "";
+ }
+ return image_space->GetImageLocation();
+}
+
+const uint32_t* OatFileAssistant::GetRequiredDexChecksum() {
+ if (!required_dex_checksum_attempted) {
+ required_dex_checksum_attempted = true;
+ required_dex_checksum_found = false;
+ std::string error_msg;
+ CHECK(dex_location_ != nullptr) << "OatFileAssistant provided no dex location";
+ if (DexFile::GetChecksum(dex_location_, &cached_required_dex_checksum, &error_msg)) {
+ required_dex_checksum_found = true;
+ } else {
+ // This can happen if the original dex file has been stripped from the
+ // apk.
+ VLOG(oat) << "OatFileAssistant: " << error_msg;
+
+ // Get the checksum from the odex if we can.
+ const OatFile* odex_file = GetOdexFile();
+ if (odex_file != nullptr) {
+ const OatFile::OatDexFile* odex_dex_file = odex_file->GetOatDexFile(
+ dex_location_, nullptr, false);
+ if (odex_dex_file != nullptr) {
+ cached_required_dex_checksum = odex_dex_file->GetDexFileLocationChecksum();
+ required_dex_checksum_found = true;
+ }
+ }
+ }
+ }
+ return required_dex_checksum_found ? &cached_required_dex_checksum : nullptr;
+}
+
+const OatFile* OatFileAssistant::GetOdexFile() {
+ CHECK(!oat_file_released_) << "OdexFile called after oat file released.";
+ if (!odex_file_load_attempted_) {
+ odex_file_load_attempted_ = true;
+ if (OdexFileName() != nullptr) {
+ const std::string& odex_file_name = *OdexFileName();
+ std::string error_msg;
+ cached_odex_file_.reset(OatFile::Open(odex_file_name.c_str(),
+ odex_file_name.c_str(), nullptr, nullptr, load_executable_,
+ &error_msg));
+ if (cached_odex_file_.get() == nullptr) {
+ VLOG(oat) << "OatFileAssistant test for existing pre-compiled oat file "
+ << odex_file_name << ": " << error_msg;
+ }
+ }
+ }
+ return cached_odex_file_.get();
+}
+
+void OatFileAssistant::ClearOdexFileCache() {
+ odex_file_load_attempted_ = false;
+ cached_odex_file_.reset();
+ odex_file_is_out_of_date_attempted_ = false;
+ odex_file_is_up_to_date_attempted_ = false;
+}
+
+const OatFile* OatFileAssistant::GetOatFile() {
+ CHECK(!oat_file_released_) << "OatFile called after oat file released.";
+ if (!oat_file_load_attempted_) {
+ oat_file_load_attempted_ = true;
+ if (OatFileName() != nullptr) {
+ const std::string& oat_file_name = *OatFileName();
+ std::string error_msg;
+ cached_oat_file_.reset(OatFile::Open(oat_file_name.c_str(),
+ oat_file_name.c_str(), nullptr, nullptr, load_executable_, &error_msg));
+ if (cached_oat_file_.get() == nullptr) {
+ VLOG(oat) << "OatFileAssistant test for existing oat file "
+ << oat_file_name << ": " << error_msg;
+ }
+ }
+ }
+ return cached_oat_file_.get();
+}
+
+void OatFileAssistant::ClearOatFileCache() {
+ oat_file_load_attempted_ = false;
+ cached_oat_file_.reset();
+ oat_file_is_out_of_date_attempted_ = false;
+ oat_file_is_up_to_date_attempted_ = false;
+}
+
+const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
+ if (!image_info_load_attempted_) {
+ image_info_load_attempted_ = true;
+
+ Runtime* runtime = Runtime::Current();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ if (image_space != nullptr) {
+ cached_image_info_.location = image_space->GetImageLocation();
+
+ if (isa_ == kRuntimeISA) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ cached_image_info_.oat_checksum = image_header.GetOatChecksum();
+ cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
+ cached_image_info_.patch_delta = image_header.GetPatchDelta();
+ } else {
+ std::unique_ptr<ImageHeader> image_header(
+ gc::space::ImageSpace::ReadImageHeaderOrDie(
+ cached_image_info_.location.c_str(), isa_));
+ cached_image_info_.oat_checksum = image_header->GetOatChecksum();
+ cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+ cached_image_info_.patch_delta = image_header->GetPatchDelta();
+ }
+ }
+ image_info_load_succeeded_ = (image_space != nullptr);
+ }
+ return image_info_load_succeeded_ ? &cached_image_info_ : nullptr;
+}
+
+ProfileFile* OatFileAssistant::GetProfile() {
+ if (!profile_load_attempted_) {
+ CHECK(package_name_ != nullptr)
+ << "pakage_name_ is nullptr: "
+ << "profile_load_attempted_ should have been true";
+ profile_load_attempted_ = true;
+ std::string profile_name = ProfileFileName();
+ if (!profile_name.empty()) {
+ profile_load_succeeded_ = cached_profile_.LoadFile(profile_name);
+ }
+ }
+ return profile_load_succeeded_ ? &cached_profile_ : nullptr;
+}
+
+ProfileFile* OatFileAssistant::GetOldProfile() {
+ if (!old_profile_load_attempted_) {
+ CHECK(package_name_ != nullptr)
+ << "pakage_name_ is nullptr: "
+ << "old_profile_load_attempted_ should have been true";
+ old_profile_load_attempted_ = true;
+ std::string old_profile_name = OldProfileFileName();
+ if (!old_profile_name.empty()) {
+ old_profile_load_succeeded_ = cached_old_profile_.LoadFile(old_profile_name);
+ }
+ }
+ return old_profile_load_succeeded_ ? &cached_old_profile_ : nullptr;
+}
+
+} // namespace art
+
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
new file mode 100644
index 0000000..958b440
--- /dev/null
+++ b/runtime/oat_file_assistant.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OAT_FILE_ASSISTANT_H_
+#define ART_RUNTIME_OAT_FILE_ASSISTANT_H_
+
+#include <cstdint>
+#include <memory>
+#include <string>
+
+#include "arch/instruction_set.h"
+#include "base/scoped_flock.h"
+#include "base/unix_file/fd_file.h"
+#include "oat_file.h"
+#include "os.h"
+#include "profiler.h"
+
+namespace art {
+
+// Class for assisting with oat file management.
+//
+// This class collects common utilities for determining the status of an oat
+// file on the device, updating the oat file, and loading the oat file.
+//
+// The oat file assistant is intended to be used with dex locations not on the
+// boot class path. See the IsInBootClassPath method for a way to check if the
+// dex location is in the boot class path.
+//
+// TODO: All the profiling related code is old and untested. It should either
+// be restored and tested, or removed.
+class OatFileAssistant {
+ public:
+ enum Status {
+ // kOutOfDate - An oat file is said to be out of date if the file does not
+ // exist, or is out of date with respect to the dex file or boot image.
+ kOutOfDate,
+
+ // kNeedsRelocation - An oat file is said to need relocation if the code
+ // is up to date, but not yet properly relocated for address space layout
+ // randomization (ASLR). In this case, the oat file is neither "out of
+ // date" nor "up to date".
+ kNeedsRelocation,
+
+ // kUpToDate - An oat file is said to be up to date if it is not out of
+ // date and has been properly relocated for the purposes of ASLR.
+ kUpToDate,
+ };
+
+ // Constructs an OatFileAssistant object to assist the oat file
+ // corresponding to the given dex location with the target instruction set.
+ //
+ // The dex_location must not be NULL and should remain available and
+ // unchanged for the duration of the lifetime of the OatFileAssistant object.
+ // Typically the dex_location is the absolute path to the original,
+ // un-optimized dex file.
+ //
+ //
+ // Note: Currently the dex_location must have an extension.
+ // TODO: Relax this restriction?
+ //
+ // The isa should be either the 32 bit or 64 bit variant for the current
+ // device. For example, on an arm device, use arm or arm64. An oat file can
+ // be loaded executable only if the ISA matches the current runtime.
+ OatFileAssistant(const char* dex_location, const InstructionSet isa,
+ bool load_executable);
+
+ // Constructs an OatFileAssistant, providing an explicit target oat_location
+ // to use instead of the standard oat location.
+ OatFileAssistant(const char* dex_location, const char* oat_location,
+ const InstructionSet isa, bool load_executable);
+
+ // Constructs an OatFileAssistant, providing an additional package_name used
+ // solely for the purpose of locating profile files.
+ //
+ // TODO: Why is the name of the profile file based on the package name and
+ // not the dex location? If there is no technical reason the dex_location
+ // can't be used, we should prefer that instead.
+ OatFileAssistant(const char* dex_location, const InstructionSet isa,
+ bool load_executable, const char* package_name);
+
+ // Constructs an OatFileAssistant with user specified oat location and a
+ // package name.
+ OatFileAssistant(const char* dex_location, const char* oat_location,
+ const InstructionSet isa, bool load_executable,
+ const char* package_name);
+
+ ~OatFileAssistant();
+
+ // Returns true if the dex location refers to an element of the boot class
+ // path.
+ bool IsInBootClassPath();
+
+ // Obtains a lock on the target oat file.
+ // Only one OatFileAssistant object can hold the lock for a target oat file
+ // at a time. The Lock is released automatically when the OatFileAssistant
+ // object goes out of scope. The Lock() method must not be called if the
+ // lock has already been acquired.
+ //
+ // Returns true on success.
+ // Returns false on error, in which case error_msg will contain more
+ // information on the error.
+ //
+ // The 'error_msg' argument must not be null.
+ //
+ // This is intended to be used to avoid race conditions when multiple
+ // processes generate oat files, such as when a foreground Activity and
+ // a background Service both use DexClassLoaders pointing to the same dex
+ // file.
+ bool Lock(std::string* error_msg);
+
+ // Returns the overall compilation status for the given dex location.
+ Status GetStatus();
+
+ // Attempts to generate or relocate the oat file as needed to make it up to
+ // date.
+ // Returns true on success.
+ //
+ // If there is a failure, the value of error_msg will be set to a string
+ // describing why there was failure. error_msg must not be nullptr.
+ bool MakeUpToDate(std::string* error_msg);
+
+ // Returns an oat file that can be used for loading dex files.
+ // Returns nullptr if no suitable oat file was found.
+ //
+ // After this call, no other methods of the OatFileAssistant should be
+ // called, because access to the loaded oat file has been taken away from
+ // the OatFileAssistant object.
+ std::unique_ptr<OatFile> GetBestOatFile();
+
+ // Loads the dex files in the given oat file for the given dex location.
+ // The oat file should be up to date for the given dex location.
+ // This loads multiple dex files in the case of multidex.
+ // Returns an empty vector if no dex files for that location could be loaded
+ // from the oat file.
+ //
+ // The caller is responsible for freeing the dex_files returned, if any. The
+ // dex_files will only remain valid as long as the oat_file is valid.
+ static std::vector<std::unique_ptr<const DexFile>> LoadDexFiles(
+ const OatFile& oat_file, const char* dex_location);
+
+ // If the dex file has been pre-compiled on the host, the compiled oat file
+ // will have the extension .odex, and is referred to as the odex file.
+ // It is called odex for legacy reasons; the file is really an oat file. The
+ // odex file will typically have a patch delta of 0 and need to be relocated
+ // before use for the purposes of ASLR.
+ // These methods return the location and status of the odex file for the dex
+ // location.
+ // Notes:
+ // * OdexFileName may return null if the odex file name could not be
+ // determined.
+ const std::string* OdexFileName();
+ bool OdexFileExists();
+ Status OdexFileStatus();
+ bool OdexFileIsOutOfDate();
+ bool OdexFileNeedsRelocation();
+ bool OdexFileIsUpToDate();
+
+ // When the dex files is compiled on the target device, the oat file is the
+ // result. The oat file will have been relocated to some
+ // (possibly-out-of-date) offset for ASLR.
+ // These methods return the location and status of the target oat file for
+ // the dex location.
+ //
+ // Notes:
+ // * To get the overall status of the compiled code for this dex_location,
+ // use the GetStatus() method, not the OatFileStatus() method.
+ // * OatFileName may return null if the oat file name could not be
+ // determined.
+ const std::string* OatFileName();
+ bool OatFileExists();
+ Status OatFileStatus();
+ bool OatFileIsOutOfDate();
+ bool OatFileNeedsRelocation();
+ bool OatFileIsUpToDate();
+
+ // These methods return the status for a given opened oat file with respect
+ // to the dex location.
+ Status GivenOatFileStatus(const OatFile& file);
+ bool GivenOatFileIsOutOfDate(const OatFile& file);
+ bool GivenOatFileNeedsRelocation(const OatFile& file);
+ bool GivenOatFileIsUpToDate(const OatFile& file);
+
+ // Returns true if there is an accessible profile associated with the dex
+ // location.
+ // This returns false if profiling is disabled.
+ bool ProfileExists();
+
+ // The old profile is a file containing a previous snapshot of profiling
+ // information associated with the dex file code. This is used to track how
+ // the profiling information has changed over time.
+ //
+ // Returns true if there is an accessible old profile associated with the
+ // dex location.
+ // This returns false if profiling is disabled.
+ bool OldProfileExists();
+
+ // Returns true if there has been a significant change between the old
+ // profile and the current profile.
+ // This returns false if profiling is disabled.
+ bool IsProfileChangeSignificant();
+
+ // Copy the current profile to the old profile location.
+ void CopyProfileFile();
+
+ // Generates the oat file by relocation from the odex file.
+ // This does not check the current status before attempting to relocate the
+ // oat file.
+ // Returns true on success.
+ // This will fail if dex2oat is not enabled in the current runtime.
+ //
+ // If there is a failure, the value of error_msg will be set to a string
+ // describing why there was failure. error_msg must not be nullptr.
+ bool RelocateOatFile(std::string* error_msg);
+
+ // Generate the oat file from the dex file.
+ // This does not check the current status before attempting to generate the
+ // oat file.
+ // Returns true on success.
+ // This will fail if dex2oat is not enabled in the current runtime.
+ //
+ // If there is a failure, the value of error_msg will be set to a string
+ // describing why there was failure. error_msg must not be nullptr.
+ bool GenerateOatFile(std::string* error_msg);
+
+ // Executes dex2oat using the current runtime configuration overridden with
+ // the given arguments. This does not check to see if dex2oat is enabled in
+ // the runtime configuration.
+ // Returns true on success.
+ //
+ // If there is a failure, the value of error_msg will be set to a string
+ // describing why there was failure. error_msg must not be nullptr.
+ //
+ // TODO: The OatFileAssistant probably isn't the right place to have this
+ // function.
+ static bool Dex2Oat(const std::vector<std::string>& args, std::string* error_msg);
+
+ // Constructs the odex file name for the given dex location.
+ // Returns true on success, in which case odex_filename is set to the odex
+ // file name.
+ // Returns false on error, in which case error_msg describes the error.
+ // Neither odex_filename nor error_msg may be null.
+ static bool DexFilenameToOdexFilename(const std::string& location,
+ InstructionSet isa, std::string* odex_filename, std::string* error_msg);
+
+ private:
+ struct ImageInfo {
+ uint32_t oat_checksum = 0;
+ uintptr_t oat_data_begin = 0;
+ int32_t patch_delta = 0;
+ std::string location;
+ };
+
+ // Returns the path to the dalvik cache directory.
+ // Does not check existence of the cache or try to create it.
+ // Includes the trailing slash.
+ // Returns an empty string if we can't get the dalvik cache directory path.
+ std::string DalvikCacheDirectory();
+
+ // Constructs the filename for the profile file.
+ // Returns an empty string if we do not have the necessary information to
+ // construct the filename.
+ std::string ProfileFileName();
+
+ // Constructs the filename for the old profile file.
+ // Returns an empty string if we do not have the necessary information to
+ // construct the filename.
+ std::string OldProfileFileName();
+
+ // Returns the current image location.
+ // Returns an empty string if the image location could not be retrieved.
+ //
+ // TODO: This method should belong with an image file manager, not
+ // the oat file assistant.
+ static std::string ImageLocation();
+
+ // Gets the dex checksum required for an up-to-date oat file.
+ // Returns dex_checksum if a required checksum was located. Returns
+ // nullptr if the required checksum was not found.
+ // The caller shouldn't clean up or free the returned pointer.
+ const uint32_t* GetRequiredDexChecksum();
+
+ // Returns the loaded odex file.
+ // Loads the file if needed. Returns nullptr if the file failed to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ const OatFile* GetOdexFile();
+
+ // Clear any cached information about the odex file that depends on the
+ // contents of the file.
+ void ClearOdexFileCache();
+
+ // Returns the loaded oat file.
+ // Loads the file if needed. Returns nullptr if the file failed to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ const OatFile* GetOatFile();
+
+ // Clear any cached information about the oat file that depends on the
+ // contents of the file.
+ void ClearOatFileCache();
+
+ // Returns the loaded image info.
+ // Loads the image info if needed. Returns nullptr if the image info failed
+ // to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ const ImageInfo* GetImageInfo();
+
+ // Returns the loaded profile.
+ // Loads the profile if needed. Returns nullptr if the profile failed
+ // to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ ProfileFile* GetProfile();
+
+ // Returns the loaded old profile.
+ // Loads the old profile if needed. Returns nullptr if the old profile
+ // failed to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ ProfileFile* GetOldProfile();
+
+ // To implement Lock(), we lock a dummy file where the oat file would go
+ // (adding ".flock" to the target file name) and retain the lock for the
+ // remaining lifetime of the OatFileAssistant object.
+ std::unique_ptr<File> lock_file_;
+ ScopedFlock flock_;
+
+ // In a properly constructed OatFileAssistant object, dex_location_ should
+ // never be nullptr.
+ const char* dex_location_ = nullptr;
+
+ // In a properly constructed OatFileAssistant object, isa_ should be either
+ // the 32 or 64 bit variant for the current device.
+ const InstructionSet isa_ = kNone;
+
+ // The package name, used solely to find the profile file.
+ // This may be nullptr in a properly constructed object. In this case,
+ // profile_load_attempted_ and old_profile_load_attempted_ will be true, and
+ // profile_load_succeeded_ and old_profile_load_succeeded_ will be false.
+ const char* package_name_ = nullptr;
+
+ // Whether we will attempt to load oat files executable.
+ bool load_executable_ = false;
+
+ // Cached value of the required dex checksum.
+ // This should be accessed only by the GetRequiredDexChecksum() method.
+ uint32_t cached_required_dex_checksum;
+ bool required_dex_checksum_attempted = false;
+ bool required_dex_checksum_found;
+
+ // Cached value of the odex file name.
+ // This should be accessed only by the OdexFileName() method.
+ bool cached_odex_file_name_attempted_ = false;
+ bool cached_odex_file_name_found_;
+ std::string cached_odex_file_name_;
+
+ // Cached value of the loaded odex file.
+ // Use the GetOdexFile method rather than accessing this directly, unless you
+ // know the odex file isn't out of date.
+ bool odex_file_load_attempted_ = false;
+ std::unique_ptr<OatFile> cached_odex_file_;
+
+ // Cached results for OdexFileIsOutOfDate
+ bool odex_file_is_out_of_date_attempted_ = false;
+ bool cached_odex_file_is_out_of_date_;
+
+ // Cached results for OdexFileIsUpToDate
+ bool odex_file_is_up_to_date_attempted_ = false;
+ bool cached_odex_file_is_up_to_date_;
+
+ // Cached value of the oat file name.
+ // This should be accessed only by the OatFileName() method.
+ bool cached_oat_file_name_attempted_ = false;
+ bool cached_oat_file_name_found_;
+ std::string cached_oat_file_name_;
+
+ // Cached value of the loaded odex file.
+ // Use the GetOatFile method rather than accessing this directly, unless you
+ // know the odex file isn't out of date.
+ bool oat_file_load_attempted_ = false;
+ std::unique_ptr<OatFile> cached_oat_file_;
+
+ // Cached results for OatFileIsOutOfDate
+ bool oat_file_is_out_of_date_attempted_ = false;
+ bool cached_oat_file_is_out_of_date_;
+
+ // Cached results for OatFileIsUpToDate
+ bool oat_file_is_up_to_date_attempted_ = false;
+ bool cached_oat_file_is_up_to_date_;
+
+ // Cached value of the image info.
+ // Use the GetImageInfo method rather than accessing these directly.
+ // TODO: The image info should probably be moved out of the oat file
+ // assistant to an image file manager.
+ bool image_info_load_attempted_ = false;
+ bool image_info_load_succeeded_ = false;
+ ImageInfo cached_image_info_;
+
+ // Cached value of the profile file.
+ // Use the GetProfile method rather than accessing these directly.
+ bool profile_load_attempted_ = false;
+ bool profile_load_succeeded_ = false;
+ ProfileFile cached_profile_;
+
+ // Cached value of the profile file.
+ // Use the GetOldProfile method rather than accessing these directly.
+ bool old_profile_load_attempted_ = false;
+ bool old_profile_load_succeeded_ = false;
+ ProfileFile cached_old_profile_;
+
+ // For debugging only.
+ // If this flag is set, the oat or odex file has been released to the user
+ // of the OatFileAssistant object and the OatFileAssistant object is in a
+ // bad state and should no longer be used.
+ bool oat_file_released_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(OatFileAssistant);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_OAT_FILE_ASSISTANT_H_
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
new file mode 100644
index 0000000..71679ae
--- /dev/null
+++ b/runtime/oat_file_assistant_test.cc
@@ -0,0 +1,878 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat_file_assistant.h"
+
+#include <algorithm>
+#include <fstream>
+#include <string>
+#include <vector>
+#include <sys/param.h>
+
+#include <backtrace/BacktraceMap.h>
+#include <gtest/gtest.h>
+
+#include "class_linker.h"
+#include "common_runtime_test.h"
+#include "mem_map.h"
+#include "os.h"
+#include "thread-inl.h"
+#include "utils.h"
+
+namespace art {
+
+class OatFileAssistantTest : public CommonRuntimeTest {
+ public:
+ virtual void SetUp() {
+ ReserveImageSpace();
+ CommonRuntimeTest::SetUp();
+
+ // Create a scratch directory to work from.
+ scratch_dir_ = android_data_ + "/OatFileAssistantTest";
+ ASSERT_EQ(0, mkdir(scratch_dir_.c_str(), 0700));
+
+ // Create a subdirectory in scratch for the current isa.
+ // This is the location that will be used for odex files in the tests.
+ isa_dir_ = scratch_dir_ + "/" + GetInstructionSetString(kRuntimeISA);
+ ASSERT_EQ(0, mkdir(isa_dir_.c_str(), 0700));
+
+ // Verify the environment is as we expect
+ uint32_t checksum;
+ std::string error_msg;
+ ASSERT_TRUE(OS::FileExists(GetImageFile().c_str()))
+ << "Expected pre-compiled boot image to be at: " << GetImageFile();
+ ASSERT_TRUE(OS::FileExists(GetDexSrc1().c_str()))
+ << "Expected dex file to be at: " << GetDexSrc1();
+ ASSERT_TRUE(OS::FileExists(GetStrippedDexSrc1().c_str()))
+ << "Expected stripped dex file to be at: " << GetStrippedDexSrc1();
+ ASSERT_FALSE(DexFile::GetChecksum(GetStrippedDexSrc1().c_str(), &checksum, &error_msg))
+ << "Expected stripped dex file to be stripped: " << GetStrippedDexSrc1();
+ ASSERT_TRUE(OS::FileExists(GetMultiDexSrc1().c_str()))
+ << "Expected multidex file to be at: " << GetMultiDexSrc1();
+ ASSERT_TRUE(OS::FileExists(GetDexSrc2().c_str()))
+ << "Expected dex file to be at: " << GetDexSrc2();
+ }
+
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) {
+ // options->push_back(std::make_pair("-verbose:oat", nullptr));
+
+ // Set up the image location.
+ options->push_back(std::make_pair("-Ximage:" + GetImageLocation(),
+ nullptr));
+ // Make sure compilercallbacks are not set so that relocation will be
+ // enabled.
+ for (std::pair<std::string, const void*>& pair : *options) {
+ if (pair.first == "compilercallbacks") {
+ pair.second = nullptr;
+ }
+ }
+ }
+
+ virtual void PreRuntimeCreate() {
+ UnreserveImageSpace();
+ }
+
+ virtual void PostRuntimeCreate() {
+ ReserveImageSpace();
+ }
+
+ virtual void TearDown() {
+ ClearDirectory(isa_dir_.c_str());
+ ASSERT_EQ(0, rmdir(isa_dir_.c_str()));
+
+ ClearDirectory(scratch_dir_.c_str());
+ ASSERT_EQ(0, rmdir(scratch_dir_.c_str()));
+
+ CommonRuntimeTest::TearDown();
+ }
+
+ void Copy(std::string src, std::string dst) {
+ std::ifstream src_stream(src, std::ios::binary);
+ std::ofstream dst_stream(dst, std::ios::binary);
+
+ dst_stream << src_stream.rdbuf();
+ }
+
+ // Returns the directory where the pre-compiled core.art can be found.
+ // TODO: We should factor out this into common tests somewhere rather than
+ // re-hardcoding it here (This was copied originally from the elf writer
+ // test).
+ std::string GetImageDirectory() {
+ if (IsHost()) {
+ const char* host_dir = getenv("ANDROID_HOST_OUT");
+ CHECK(host_dir != NULL);
+ return std::string(host_dir) + "/framework";
+ } else {
+ return std::string("/data/art-test");
+ }
+ }
+
+ std::string GetImageLocation() {
+ return GetImageDirectory() + "/core.art";
+ }
+
+ std::string GetImageFile() {
+ return GetImageDirectory() + "/" + GetInstructionSetString(kRuntimeISA)
+ + "/core.art";
+ }
+
+ std::string GetDexSrc1() {
+ return GetTestDexFileName("Main");
+ }
+
+ // Returns the path to a dex file equivalent to GetDexSrc1, but with the dex
+ // file stripped.
+ std::string GetStrippedDexSrc1() {
+ return GetTestDexFileName("MainStripped");
+ }
+
+ std::string GetMultiDexSrc1() {
+ return GetTestDexFileName("MultiDex");
+ }
+
+ std::string GetDexSrc2() {
+ return GetTestDexFileName("Nested");
+ }
+
+ // Scratch directory, for dex and odex files (oat files will go in the
+ // dalvik cache).
+ std::string GetScratchDir() {
+ return scratch_dir_;
+ }
+
+ // ISA directory is the subdirectory in the scratch directory where odex
+ // files should be located.
+ std::string GetISADir() {
+ return isa_dir_;
+ }
+
+ // Generate an odex file for the purposes of test.
+ // If pic is true, generates a PIC odex.
+ void GenerateOdexForTest(const std::string& dex_location,
+ const std::string& odex_location,
+ bool pic = false) {
+ // For this operation, we temporarily redirect the dalvik cache so dex2oat
+ // doesn't find the relocated image file.
+ std::string android_data_tmp = GetScratchDir() + "AndroidDataTmp";
+ setenv("ANDROID_DATA", android_data_tmp.c_str(), 1);
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location);
+ args.push_back("--oat-file=" + odex_location);
+ if (pic) {
+ args.push_back("--compile-pic");
+ } else {
+ args.push_back("--include-patch-information");
+
+ // We need to use the quick compiler to generate non-PIC code, because
+ // the optimizing compiler always generates PIC.
+ args.push_back("--compiler-backend=Quick");
+ }
+ args.push_back("--runtime-arg");
+ args.push_back("-Xnorelocate");
+ std::string error_msg;
+ ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
+ setenv("ANDROID_DATA", android_data_.c_str(), 1);
+ }
+
+ void GeneratePicOdexForTest(const std::string& dex_location,
+ const std::string& odex_location) {
+ GenerateOdexForTest(dex_location, odex_location, true);
+ }
+
+ private:
+ // Reserve memory around where the image will be loaded so other memory
+ // won't conflict when it comes time to load the image.
+ // This can be called with an already loaded image to reserve the space
+ // around it.
+ void ReserveImageSpace() {
+ MemMap::Init();
+
+ // Ensure a chunk of memory is reserved for the image space.
+ uintptr_t reservation_start = ART_BASE_ADDRESS + ART_BASE_ADDRESS_MIN_DELTA;
+ uintptr_t reservation_end = ART_BASE_ADDRESS + ART_BASE_ADDRESS_MAX_DELTA
+ + 100 * 1024 * 1024;
+
+ std::string error_msg;
+ std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+ ASSERT_TRUE(map.get() != nullptr) << "Failed to build process map";
+ for (BacktraceMap::const_iterator it = map->begin();
+ reservation_start < reservation_end && it != map->end(); ++it) {
+ if (it->end <= reservation_start) {
+ continue;
+ }
+
+ if (it->start < reservation_start) {
+ reservation_start = std::min(reservation_end, it->end);
+ }
+
+ image_reservation_.push_back(std::unique_ptr<MemMap>(
+ MemMap::MapAnonymous("image reservation",
+ reinterpret_cast<uint8_t*>(reservation_start),
+ std::min(it->start, reservation_end) - reservation_start,
+ PROT_NONE, false, false, &error_msg)));
+ ASSERT_TRUE(image_reservation_.back().get() != nullptr) << error_msg;
+ LOG(INFO) << "Reserved space for image " <<
+ reinterpret_cast<void*>(image_reservation_.back()->Begin()) << "-" <<
+ reinterpret_cast<void*>(image_reservation_.back()->End());
+ reservation_start = it->end;
+ }
+ }
+
+
+ // Unreserve any memory reserved by ReserveImageSpace. This should be called
+ // before the image is loaded.
+ void UnreserveImageSpace() {
+ image_reservation_.clear();
+ }
+
+ std::string scratch_dir_;
+ std::string isa_dir_;
+ std::vector<std::unique_ptr<MemMap>> image_reservation_;
+};
+
+class OatFileAssistantNoDex2OatTest : public OatFileAssistantTest {
+ public:
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) {
+ OatFileAssistantTest::SetUpRuntimeOptions(options);
+ options->push_back(std::make_pair("-Xnodex2oat", nullptr));
+ }
+};
+
+// Generate an oat file for the purposes of test, as opposed to testing
+// generation of oat files.
+static void GenerateOatForTest(const char* dex_location) {
+ OatFileAssistant oat_file_assistant(dex_location, kRuntimeISA, false);
+
+ std::string error_msg;
+ ASSERT_TRUE(oat_file_assistant.GenerateOatFile(&error_msg)) << error_msg;
+}
+
+// Case: We have a DEX file, but no OAT file for it.
+// Expect: The oat file status is kOutOfDate.
+TEST_F(OatFileAssistantTest, DexNoOat) {
+ std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.OdexFileStatus());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.OatFileStatus());
+}
+
+// Case: We have no DEX file and no OAT file.
+// Expect: Status is out of date. Loading should fail, but not crash.
+TEST_F(OatFileAssistantTest, NoDexNoOat) {
+ std::string dex_location = GetScratchDir() + "/NoDexNoOat.jar";
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ EXPECT_EQ(nullptr, oat_file.get());
+}
+
+// Case: We have a DEX file and up-to-date OAT file for it.
+// Expect: The oat file status is kUpToDate.
+TEST_F(OatFileAssistantTest, OatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kUpToDate, oat_file_assistant.GetStatus());
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileNeedsRelocation());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsUpToDate());
+ EXPECT_EQ(OatFileAssistant::kUpToDate, oat_file_assistant.OatFileStatus());
+}
+
+// Case: We have a MultiDEX file and up-to-date OAT file for it.
+// Expect: The oat file status is kUpToDate.
+TEST_F(OatFileAssistantTest, MultiDexOatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/MultiDexOatUpToDate.jar";
+ Copy(GetMultiDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ // Verify we can load both dex files.
+ OatFileAssistant executable_oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+ std::unique_ptr<OatFile> oat_file = executable_oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = executable_oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(2u, dex_files.size());
+}
+
+// Case: We have a DEX file and out of date OAT file.
+// Expect: The oat file status is kOutOfDate.
+TEST_F(OatFileAssistantTest, OatOutOfDate) {
+ std::string dex_location = GetScratchDir() + "/OatOutOfDate.jar";
+
+ // We create a dex, generate an oat for it, then overwrite the dex with a
+ // different dex to make the oat out of date.
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+ Copy(GetDexSrc2(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
+// Case: We have a DEX file and an ODEX file, but no OAT file.
+// Expect: The oat file status is kNeedsRelocation.
+TEST_F(OatFileAssistantTest, DexOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/DexOdexNoOat.jar";
+ std::string odex_location = GetISADir() + "/DexOdexNoOat.odex";
+
+ // Create the dex and odex files
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Verify the status.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kNeedsRelocation, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OdexFileNeedsRelocation());
+ EXPECT_EQ(OatFileAssistant::kNeedsRelocation, oat_file_assistant.OdexFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
+// Case: We have a stripped DEX file and an ODEX file, but no OAT file.
+// Expect: The oat file status is kNeedsRelocation.
+TEST_F(OatFileAssistantTest, StrippedDexOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/StrippedDexOdexNoOat.jar";
+ std::string odex_location = GetISADir() + "/StrippedDexOdexNoOat.odex";
+
+ // Create the dex and odex files
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Strip the dex file
+ Copy(GetStrippedDexSrc1(), dex_location);
+
+ // Verify the status.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_EQ(OatFileAssistant::kNeedsRelocation, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+
+ // Make the oat file up to date.
+ std::string error_msg;
+ ASSERT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+
+ EXPECT_EQ(OatFileAssistant::kUpToDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsUpToDate());
+
+ // Verify we can load the dex files from it.
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a stripped DEX file, an ODEX file, and an out of date OAT file.
+// Expect: The oat file status is kNeedsRelocation.
+TEST_F(OatFileAssistantTest, StrippedDexOdexOat) {
+ std::string dex_location = GetScratchDir() + "/StrippedDexOdexOat.jar";
+ std::string odex_location = GetISADir() + "/StrippedDexOdexOat.odex";
+
+ // Create the oat file from a different dex file so it looks out of date.
+ Copy(GetDexSrc2(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ // Create the odex file
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Strip the dex file.
+ Copy(GetStrippedDexSrc1(), dex_location);
+
+ // Verify the status.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_EQ(OatFileAssistant::kNeedsRelocation, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_TRUE(oat_file_assistant.OdexFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+
+ // Make the oat file up to date.
+ std::string error_msg;
+ ASSERT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+
+ EXPECT_EQ(OatFileAssistant::kUpToDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_TRUE(oat_file_assistant.OdexFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileNeedsRelocation());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsUpToDate());
+
+ // Verify we can load the dex files from it.
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a DEX file, an ODEX file and an OAT file, where the ODEX and
+// OAT files both have patch delta of 0.
+// Expect: It shouldn't crash.
+TEST_F(OatFileAssistantTest, OdexOatOverlap) {
+ std::string dex_location = GetScratchDir() + "/OdexOatOverlap.jar";
+ std::string odex_location = GetISADir() + "/OdexOatOverlap.odex";
+ std::string oat_location = GetISADir() + "/OdexOatOverlap.oat";
+
+ // Create the dex and odex files
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Create the oat file by copying the odex so they are located in the same
+ // place in memory.
+ Copy(odex_location, oat_location);
+
+ // Verify things don't go bad.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(),
+ oat_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_EQ(OatFileAssistant::kNeedsRelocation, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+
+ // Things aren't relocated, so it should fall back to interpreted.
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_FALSE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a DEX file and a PIC ODEX file, but no OAT file.
+// Expect: The oat file status is kUpToDate, because PIC needs no relocation.
+TEST_F(OatFileAssistantTest, DexPicOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/DexPicOdexNoOat.jar";
+ std::string odex_location = GetISADir() + "/DexPicOdexNoOat.odex";
+
+ // Create the dex and odex files
+ Copy(GetDexSrc1(), dex_location);
+ GeneratePicOdexForTest(dex_location, odex_location);
+
+ // Verify the status.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kUpToDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
+// Case: We have a DEX file and up-to-date OAT file for it.
+// Expect: We should load an executable dex file.
+TEST_F(OatFileAssistantTest, LoadOatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/LoadOatUpToDate.jar";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ // Load the oat using an oat file assistant.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a DEX file and up-to-date OAT file for it.
+// Expect: Loading non-executable should load the oat non-executable.
+TEST_F(OatFileAssistantTest, LoadNoExecOatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/LoadNoExecOatUpToDate.jar";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ // Load the oat using an oat file assistant.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_FALSE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a DEX file.
+// Expect: We should load an executable dex file from an alternative oat
+// location.
+TEST_F(OatFileAssistantTest, LoadDexNoAlternateOat) {
+ std::string dex_location = GetScratchDir() + "/LoadDexNoAlternateOat.jar";
+ std::string oat_location = GetScratchDir() + "/LoadDexNoAlternateOat.oat";
+
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(
+ dex_location.c_str(), oat_location.c_str(), kRuntimeISA, true);
+ std::string error_msg;
+ ASSERT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+
+ EXPECT_TRUE(OS::FileExists(oat_location.c_str()));
+
+ // Verify it didn't create an oat in the default location.
+ OatFileAssistant ofm(dex_location.c_str(), kRuntimeISA, false);
+ EXPECT_FALSE(ofm.OatFileExists());
+}
+
+// Case: Non-existent Dex location.
+// Expect: The dex code is out of date, and trying to update it fails.
+TEST_F(OatFileAssistantTest, NonExsistentDexLocation) {
+ std::string dex_location = GetScratchDir() + "/BadDexLocation.jar";
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+
+ std::string error_msg;
+ EXPECT_FALSE(oat_file_assistant.MakeUpToDate(&error_msg));
+ EXPECT_FALSE(error_msg.empty());
+}
+
+// Turn an absolute path into a path relative to the current working
+// directory.
+static std::string MakePathRelative(std::string target) {
+ char buf[MAXPATHLEN];
+ std::string cwd = getcwd(buf, MAXPATHLEN);
+
+ // Split the target and cwd paths into components.
+ std::vector<std::string> target_path;
+ std::vector<std::string> cwd_path;
+ Split(target, '/', &target_path);
+ Split(cwd, '/', &cwd_path);
+
+ // Reverse the path components, so we can use pop_back().
+ std::reverse(target_path.begin(), target_path.end());
+ std::reverse(cwd_path.begin(), cwd_path.end());
+
+ // Drop the common prefix of the paths. Because we reversed the path
+ // components, this becomes the common suffix of target_path and cwd_path.
+ while (!target_path.empty() && !cwd_path.empty()
+ && target_path.back() == cwd_path.back()) {
+ target_path.pop_back();
+ cwd_path.pop_back();
+ }
+
+ // For each element of the remaining cwd_path, add '..' to the beginning
+ // of the target path. Because we reversed the path components, we add to
+ // the end of target_path.
+ for (unsigned int i = 0; i < cwd_path.size(); i++) {
+ target_path.push_back("..");
+ }
+
+ // Reverse again to get the right path order, and join to get the result.
+ std::reverse(target_path.begin(), target_path.end());
+ return Join(target_path, '/');
+}
+
+// Case: Non-absolute path to Dex location.
+// Expect: Not sure, but it shouldn't crash.
+TEST_F(OatFileAssistantTest, NonAbsoluteDexLocation) {
+ std::string abs_dex_location = GetScratchDir() + "/NonAbsoluteDexLocation.jar";
+ Copy(GetDexSrc1(), abs_dex_location);
+
+ std::string dex_location = MakePathRelative(abs_dex_location);
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
+// Case: Very short, non-existent Dex location.
+// Expect: Dex code is out of date, and trying to update it fails.
+TEST_F(OatFileAssistantTest, ShortDexLocation) {
+ std::string dex_location = "/xx";
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+
+ std::string error_msg;
+ EXPECT_FALSE(oat_file_assistant.MakeUpToDate(&error_msg));
+ EXPECT_FALSE(error_msg.empty());
+}
+
+// Case: Non-standard extension for dex file.
+// Expect: The oat file status is kOutOfDate.
+TEST_F(OatFileAssistantTest, LongDexExtension) {
+ std::string dex_location = GetScratchDir() + "/LongDexExtension.jarx";
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
+// A task to generate a dex location. Used by the RaceToGenerate test.
+class RaceGenerateTask : public Task {
+ public:
+ explicit RaceGenerateTask(const std::string& dex_location, const std::string& oat_location)
+ : dex_location_(dex_location), oat_location_(oat_location),
+ loaded_oat_file_(nullptr)
+ {}
+
+ void Run(Thread* self) {
+ UNUSED(self);
+
+ // Load the dex files, and save a pointer to the loaded oat file, so that
+ // we can verify only one oat file was loaded for the dex location.
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ std::vector<std::string> error_msgs;
+ dex_files = linker->OpenDexFilesFromOat(dex_location_.c_str(), oat_location_.c_str(), &error_msgs);
+ CHECK(!dex_files.empty()) << Join(error_msgs, '\n');
+ loaded_oat_file_ = dex_files[0]->GetOatFile();
+ }
+
+ const OatFile* GetLoadedOatFile() const {
+ return loaded_oat_file_;
+ }
+
+ private:
+ std::string dex_location_;
+ std::string oat_location_;
+ const OatFile* loaded_oat_file_;
+};
+
+// Test the case where multiple processes race to generate an oat file.
+// This simulates multiple processes using multiple threads.
+//
+// We want only one Oat file to be loaded when there is a race to load, to
+// avoid using up the virtual memory address space.
+TEST_F(OatFileAssistantTest, RaceToGenerate) {
+ std::string dex_location = GetScratchDir() + "/RaceToGenerate.jar";
+ std::string oat_location = GetISADir() + "/RaceToGenerate.oat";
+
+ // We use the lib core dex file, because it's large, and hopefully should
+ // take a while to generate.
+ Copy(GetLibCoreDexFileName(), dex_location);
+
+ const int kNumThreads = 32;
+ Thread* self = Thread::Current();
+ ThreadPool thread_pool("Oat file assistant test thread pool", kNumThreads);
+ std::vector<std::unique_ptr<RaceGenerateTask>> tasks;
+ for (int i = 0; i < kNumThreads; i++) {
+ std::unique_ptr<RaceGenerateTask> task(new RaceGenerateTask(dex_location, oat_location));
+ thread_pool.AddTask(self, task.get());
+ tasks.push_back(std::move(task));
+ }
+ thread_pool.StartWorkers(self);
+ thread_pool.Wait(self, true, false);
+
+ // Verify every task got the same pointer.
+ const OatFile* expected = tasks[0]->GetLoadedOatFile();
+ for (auto& task : tasks) {
+ EXPECT_EQ(expected, task->GetLoadedOatFile());
+ }
+}
+
+// Case: We have a DEX file and an ODEX file, no OAT file, and dex2oat is
+// disabled.
+// Expect: We should load the odex file non-executable.
+TEST_F(OatFileAssistantNoDex2OatTest, LoadDexOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/LoadDexOdexNoOat.jar";
+ std::string odex_location = GetISADir() + "/LoadDexOdexNoOat.odex";
+
+ // Create the dex and odex files
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Load the oat using an executable oat file assistant.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_FALSE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a MultiDEX file and an ODEX file, no OAT file, and dex2oat is
+// disabled.
+// Expect: We should load the odex file non-executable.
+TEST_F(OatFileAssistantNoDex2OatTest, LoadMultiDexOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/LoadMultiDexOdexNoOat.jar";
+ std::string odex_location = GetISADir() + "/LoadMultiDexOdexNoOat.odex";
+
+ // Create the dex and odex files
+ Copy(GetMultiDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Load the oat using an executable oat file assistant.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_FALSE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(2u, dex_files.size());
+}
+
+TEST(OatFileAssistantUtilsTest, DexFilenameToOdexFilename) {
+ std::string error_msg;
+ std::string odex_file;
+
+ EXPECT_TRUE(OatFileAssistant::DexFilenameToOdexFilename(
+ "/foo/bar/baz.jar", kArm, &odex_file, &error_msg)) << error_msg;
+ EXPECT_EQ("/foo/bar/arm/baz.odex", odex_file);
+
+ EXPECT_TRUE(OatFileAssistant::DexFilenameToOdexFilename(
+ "/foo/bar/baz.funnyext", kArm, &odex_file, &error_msg)) << error_msg;
+ EXPECT_EQ("/foo/bar/arm/baz.odex", odex_file);
+
+ EXPECT_FALSE(OatFileAssistant::DexFilenameToOdexFilename(
+ "nopath.jar", kArm, &odex_file, &error_msg));
+ EXPECT_FALSE(OatFileAssistant::DexFilenameToOdexFilename(
+ "/foo/bar/baz_noext", kArm, &odex_file, &error_msg));
+}
+
+
+// TODO: More Tests:
+// * Test class linker falls back to unquickened dex for DexNoOat
+// * Test class linker falls back to unquickened dex for MultiDexNoOat
+// * Test multidex files:
+// - Multidex with only classes2.dex out of date should have status
+// kOutOfDate
+// * Test using secondary isa
+// * Test with profiling info?
+// * Test for status of oat while oat is being generated (how?)
+// * Test case where 32 and 64 bit boot class paths differ,
+// and we ask IsInBootClassPath for a class in exactly one of the 32 or
+// 64 bit boot class paths.
+// * Test unexpected scenarios (?):
+// - Dex is stripped, don't have odex.
+// - Oat file corrupted after status check, before reload unexecutable
+// because it's unrelocated and no dex2oat
+
+} // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 607569a..a53aeaa 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -136,6 +136,8 @@
.IntoKey(M::LongGCLogThreshold)
.Define("-XX:DumpGCPerformanceOnShutdown")
.IntoKey(M::DumpGCPerformanceOnShutdown)
+ .Define("-XX:DumpJITInfoOnShutdown")
+ .IntoKey(M::DumpJITInfoOnShutdown)
.Define("-XX:IgnoreMaxFootprint")
.IntoKey(M::IgnoreMaxFootprint)
.Define("-XX:LowMemoryMode")
@@ -620,6 +622,7 @@
UsageMessage(stream, " -XX:LongPauseLogThreshold=integervalue\n");
UsageMessage(stream, " -XX:LongGCLogThreshold=integervalue\n");
UsageMessage(stream, " -XX:DumpGCPerformanceOnShutdown\n");
+ UsageMessage(stream, " -XX:DumpJITInfoOnShutdown\n");
UsageMessage(stream, " -XX:IgnoreMaxFootprint\n");
UsageMessage(stream, " -XX:UseTLAB\n");
UsageMessage(stream, " -XX:BackgroundGC=none\n");
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 9dda144..2d6b6b3 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -165,6 +165,10 @@
}
}
+ static bool IsIntOrLongType(Type type) {
+ return type == kPrimInt || type == kPrimLong;
+ }
+
static bool Is64BitType(Type type) {
return type == kPrimLong || type == kPrimDouble;
}
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 1ddb761..0eb8eca 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -114,8 +114,7 @@
DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
};
-void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
- mirror::Throwable* exception) {
+void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) {
DCHECK(!is_deoptimization_);
if (kDebugExceptionDelivery) {
mirror::String* msg = exception->GetDetailMessage();
@@ -145,15 +144,14 @@
DCHECK(!self_->IsExceptionPending());
} else {
// Put exception back in root set with clear throw location.
- self_->SetException(ThrowLocation(), exception_ref.Get());
+ self_->SetException(exception_ref.Get());
}
// The debugger may suspend this thread and walk its stack. Let's do this before popping
// instrumentation frames.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (instrumentation->HasExceptionCaughtListeners()
&& self_->IsExceptionThrownByCurrentMethod(exception)) {
- instrumentation->ExceptionCaughtEvent(self_, throw_location, handler_method_, handler_dex_pc_,
- exception_ref.Get());
+ instrumentation->ExceptionCaughtEvent(self_, exception_ref.Get());
}
}
@@ -283,7 +281,7 @@
visitor.WalkStack(true);
// Restore deoptimization exception
- self_->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
+ self_->SetException(Thread::GetDeoptimizationException());
}
// Unwinds all instrumentation stack frame prior to catch handler or upcall.
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index a0e6a79..8cccec8 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -30,7 +30,6 @@
} // namespace mirror
class Context;
class Thread;
-class ThrowLocation;
class ShadowFrame;
// Manages exception delivery for Quick backend.
@@ -44,8 +43,7 @@
UNREACHABLE();
}
- void FindCatch(const ThrowLocation& throw_location, mirror::Throwable* exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FindCatch(mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DeoptimizeStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
NO_RETURN void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/reflection-inl.h b/runtime/reflection-inl.h
index be4d560..f21c1a0 100644
--- a/runtime/reflection-inl.h
+++ b/runtime/reflection-inl.h
@@ -27,7 +27,7 @@
namespace art {
-inline bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
+inline bool ConvertPrimitiveValue(bool unbox_for_result,
Primitive::Type srcType, Primitive::Type dstType,
const JValue& src, JValue* dst) {
DCHECK(srcType != Primitive::kPrimNot && dstType != Primitive::kPrimNot);
@@ -88,13 +88,11 @@
break;
}
if (!unbox_for_result) {
- ThrowIllegalArgumentException(throw_location,
- StringPrintf("Invalid primitive conversion from %s to %s",
+ ThrowIllegalArgumentException(StringPrintf("Invalid primitive conversion from %s to %s",
PrettyDescriptor(srcType).c_str(),
PrettyDescriptor(dstType).c_str()).c_str());
} else {
- ThrowClassCastException(throw_location,
- StringPrintf("Couldn't convert result of type %s to %s",
+ ThrowClassCastException(StringPrintf("Couldn't convert result of type %s to %s",
PrettyDescriptor(srcType).c_str(),
PrettyDescriptor(dstType).c_str()).c_str());
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 2aeb92d..a54a39d 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -207,7 +207,7 @@
static void ThrowIllegalPrimitiveArgumentException(const char* expected,
const char* found_descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowIllegalArgumentException(nullptr,
+ ThrowIllegalArgumentException(
StringPrintf("Invalid primitive conversion from %s to %s", expected,
PrettyDescriptor(found_descriptor).c_str()).c_str());
}
@@ -227,7 +227,7 @@
mirror::Class* dst_class =
h_m->GetClassFromTypeIndex(classes->GetTypeItem(args_offset).type_idx_, true);
if (UNLIKELY(arg == nullptr || !arg->InstanceOf(dst_class))) {
- ThrowIllegalArgumentException(nullptr,
+ ThrowIllegalArgumentException(
StringPrintf("method %s argument %zd has type %s, got %s",
PrettyMethod(h_m.Get(), false).c_str(),
args_offset + 1, // Humans don't count from 0.
@@ -255,7 +255,7 @@
ThrowIllegalPrimitiveArgumentException(expected, \
arg->GetClass<>()->GetDescriptor(&temp)); \
} else { \
- ThrowIllegalArgumentException(nullptr, \
+ ThrowIllegalArgumentException(\
StringPrintf("method %s argument %zd has type %s, got %s", \
PrettyMethod(h_m.Get(), false).c_str(), \
args_offset + 1, \
@@ -366,7 +366,7 @@
CHECK(self->IsExceptionPending());
LOG(ERROR) << "Internal error: unresolvable type for argument type in JNI invoke: "
<< h_m->GetTypeDescriptorFromTypeIdx(type_idx) << "\n"
- << self->GetException(nullptr)->Dump();
+ << self->GetException()->Dump();
self->ClearException();
++error_count;
} else if (!param_type->IsPrimitive()) {
@@ -580,8 +580,7 @@
uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
uint32_t arg_count = (objects != nullptr) ? objects->GetLength() : 0;
if (arg_count != classes_size) {
- ThrowIllegalArgumentException(nullptr,
- StringPrintf("Wrong number of arguments; expected %d, got %d",
+ ThrowIllegalArgumentException(StringPrintf("Wrong number of arguments; expected %d, got %d",
classes_size, arg_count).c_str());
return nullptr;
}
@@ -590,7 +589,7 @@
mirror::Class* calling_class = nullptr;
if (!accessible && !VerifyAccess(soa.Self(), receiver, declaring_class, m->GetAccessFlags(),
&calling_class)) {
- ThrowIllegalAccessException(nullptr,
+ ThrowIllegalAccessException(
StringPrintf("Class %s cannot access %s method %s of class %s",
calling_class == nullptr ? "null" : PrettyClass(calling_class).c_str(),
PrettyJavaAccessFlags(m->GetAccessFlags()).c_str(),
@@ -631,13 +630,12 @@
bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) {
if (o == nullptr) {
- ThrowNullPointerException(nullptr, "null receiver");
+ ThrowNullPointerException("null receiver");
return false;
} else if (!o->InstanceOf(c)) {
std::string expected_class_name(PrettyDescriptor(c));
std::string actual_class_name(PrettyTypeOf(o));
- ThrowIllegalArgumentException(nullptr,
- StringPrintf("Expected receiver of type %s, but got %s",
+ ThrowIllegalArgumentException(StringPrintf("Expected receiver of type %s, but got %s",
expected_class_name.c_str(),
actual_class_name.c_str()).c_str());
return false;
@@ -718,7 +716,7 @@
return "result";
}
-static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object* o,
+static bool UnboxPrimitive(mirror::Object* o,
mirror::Class* dst_class, mirror::ArtField* f,
JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -726,14 +724,12 @@
if (!dst_class->IsPrimitive()) {
if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) {
if (!unbox_for_result) {
- ThrowIllegalArgumentException(throw_location,
- StringPrintf("%s has type %s, got %s",
+ ThrowIllegalArgumentException(StringPrintf("%s has type %s, got %s",
UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str(),
PrettyTypeOf(o).c_str()).c_str());
} else {
- ThrowClassCastException(throw_location,
- StringPrintf("Couldn't convert result of type %s to %s",
+ ThrowClassCastException(StringPrintf("Couldn't convert result of type %s to %s",
PrettyTypeOf(o).c_str(),
PrettyDescriptor(dst_class).c_str()).c_str());
}
@@ -743,20 +739,17 @@
return true;
}
if (UNLIKELY(dst_class->GetPrimitiveType() == Primitive::kPrimVoid)) {
- ThrowIllegalArgumentException(throw_location,
- StringPrintf("Can't unbox %s to void",
+ ThrowIllegalArgumentException(StringPrintf("Can't unbox %s to void",
UnboxingFailureKind(f).c_str()).c_str());
return false;
}
if (UNLIKELY(o == nullptr)) {
if (!unbox_for_result) {
- ThrowIllegalArgumentException(throw_location,
- StringPrintf("%s has type %s, got null",
+ ThrowIllegalArgumentException(StringPrintf("%s has type %s, got null",
UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str()).c_str());
} else {
- ThrowNullPointerException(throw_location,
- StringPrintf("Expected to unbox a '%s' primitive type but was returned null",
+ ThrowNullPointerException(StringPrintf("Expected to unbox a '%s' primitive type but was returned null",
PrettyDescriptor(dst_class).c_str()).c_str());
}
return false;
@@ -793,14 +786,14 @@
boxed_value.SetS(primitive_field->GetShort(o));
} else {
std::string temp;
- ThrowIllegalArgumentException(throw_location,
+ ThrowIllegalArgumentException(
StringPrintf("%s has type %s, got %s", UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str(),
PrettyDescriptor(o->GetClass()->GetDescriptor(&temp)).c_str()).c_str());
return false;
}
- return ConvertPrimitiveValue(throw_location, unbox_for_result,
+ return ConvertPrimitiveValue(unbox_for_result,
src_class->GetPrimitiveType(), dst_class->GetPrimitiveType(),
boxed_value, unboxed_value);
}
@@ -808,12 +801,12 @@
bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
JValue* unboxed_value) {
DCHECK(f != nullptr);
- return UnboxPrimitive(nullptr, o, dst_class, f, unboxed_value);
+ return UnboxPrimitive(o, dst_class, f, unboxed_value);
}
-bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
+bool UnboxPrimitiveForResult(mirror::Object* o,
mirror::Class* dst_class, JValue* unboxed_value) {
- return UnboxPrimitive(&throw_location, o, dst_class, nullptr, unboxed_value);
+ return UnboxPrimitive(o, dst_class, nullptr, unboxed_value);
}
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 1a64871..857d63b 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -31,18 +31,16 @@
union JValue;
class ScopedObjectAccessAlreadyRunnable;
class ShadowFrame;
-class ThrowLocation;
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue* unboxed_value)
+bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-ALWAYS_INLINE bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
+ALWAYS_INLINE bool ConvertPrimitiveValue(bool unbox_for_result,
Primitive::Type src_class, Primitive::Type dst_class,
const JValue& src, JValue* dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 35a9e6f..0728646 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -251,6 +251,7 @@
VLOG(jit) << "Deleting jit";
jit_.reset(nullptr);
}
+ arena_pool_.reset();
// Shutdown the fault manager if it was initialized.
fault_manager.Shutdown();
@@ -306,11 +307,8 @@
DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self));
self->Dump(os);
if (self->IsExceptionPending()) {
- ThrowLocation throw_location;
- mirror::Throwable* exception = self->GetException(&throw_location);
- os << "Pending exception " << PrettyTypeOf(exception)
- << " thrown by '" << throw_location.Dump() << "'\n"
- << exception->Dump();
+ mirror::Throwable* exception = self->GetException();
+ os << "Pending exception " << exception->Dump();
}
}
@@ -857,6 +855,11 @@
CreateJit();
}
+ // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
+ // can't be trimmed as easily.
+ const bool use_malloc = jit_options_.get() == nullptr;
+ arena_pool_.reset(new ArenaPool(use_malloc));
+
BlockSignals();
InitPlatformSignalHandlers();
@@ -1017,17 +1020,17 @@
}
// Pre-allocate an OutOfMemoryError for the double-OOME case.
- self->ThrowNewException(ThrowLocation(), "Ljava/lang/OutOfMemoryError;",
+ self->ThrowNewException("Ljava/lang/OutOfMemoryError;",
"OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
"no stack trace available");
- pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException(NULL));
+ pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException());
self->ClearException();
// Pre-allocate a NoClassDefFoundError for the common case of failing to find a system class
// ahead of checking the application's class loader.
- self->ThrowNewException(ThrowLocation(), "Ljava/lang/NoClassDefFoundError;",
+ self->ThrowNewException("Ljava/lang/NoClassDefFoundError;",
"Class not found using the boot class loader; no stack trace available");
- pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException(NULL));
+ pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException());
self->ClearException();
// Look for a native bridge.
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 5078b7f..4cddb5c 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -28,6 +28,7 @@
#include "arch/instruction_set.h"
#include "base/allocator.h"
+#include "base/arena_allocator.h"
#include "base/macros.h"
#include "compiler_callbacks.h"
#include "gc_root.h"
@@ -545,6 +546,13 @@
void CreateJit();
+ ArenaPool* GetArenaPool() {
+ return arena_pool_.get();
+ }
+ const ArenaPool* GetArenaPool() const {
+ return arena_pool_.get();
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -608,6 +616,8 @@
gc::Heap* heap_;
+ std::unique_ptr<ArenaPool> arena_pool_;
+
// The number of spins that are done before thread suspension is used to forcibly inflate.
size_t max_spins_before_thin_lock_inflation_;
MonitorList* monitor_list_;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index d072ffa..8775f8d 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -59,6 +59,7 @@
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
LongGCLogThreshold, gc::Heap::kDefaultLongGCLogThreshold)
RUNTIME_OPTIONS_KEY (Unit, DumpGCPerformanceOnShutdown)
+RUNTIME_OPTIONS_KEY (Unit, DumpJITInfoOnShutdown)
RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
RUNTIME_OPTIONS_KEY (bool, UseTLAB, false)
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 97a8d01..b8ca21e 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -28,7 +28,6 @@
#include "runtime.h"
#include "thread.h"
#include "thread_list.h"
-#include "throw_location.h"
#include "verify_object-inl.h"
#include "vmap_table.h"
@@ -57,10 +56,6 @@
}
}
-ThrowLocation ShadowFrame::GetCurrentLocationForThrow() const {
- return ThrowLocation(GetThisObject(), GetMethod(), GetDexPC());
-}
-
size_t ManagedStack::NumJniShadowFrameReferences() const {
size_t count = 0;
for (const ManagedStack* current_fragment = this; current_fragment != NULL;
@@ -134,12 +129,6 @@
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
- } else if (m->IsOptimized(GetInstructionSetPointerSize(
- Runtime::Current()->GetInstructionSet()))) {
- // TODO: Implement, currently only used for exceptions when jdwp is enabled.
- UNIMPLEMENTED(WARNING)
- << "StackVisitor::GetThisObject is unimplemented with the optimizing compiler";
- return nullptr;
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
@@ -209,29 +198,32 @@
DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
// its instructions?
DCHECK_LT(vreg, code_item->registers_size_);
- DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map,
- code_item->registers_size_);
- DexRegisterMap::LocationKind location_kind = dex_register_map.GetLocationKind(vreg);
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, code_item->registers_size_);
+ DexRegisterLocation::Kind location_kind = dex_register_map.GetLocationKind(vreg);
switch (location_kind) {
- case DexRegisterMap::kInStack: {
+ case DexRegisterLocation::Kind::kInStack: {
const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg);
const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
*val = *reinterpret_cast<const uint32_t*>(addr);
return true;
}
- case DexRegisterMap::kInRegister:
- case DexRegisterMap::kInFpuRegister: {
+ case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInFpuRegister: {
uint32_t reg = dex_register_map.GetMachineRegister(vreg);
return GetRegisterIfAccessible(reg, kind, val);
}
- case DexRegisterMap::kConstant:
+ case DexRegisterLocation::Kind::kConstant:
*val = dex_register_map.GetConstant(vreg);
return true;
- case DexRegisterMap::kNone:
+ case DexRegisterLocation::Kind::kNone:
return false;
+ default:
+ LOG(FATAL)
+ << "Unexpected location kind"
+ << DexRegisterLocation::PrettyDescriptor(dex_register_map.GetLocationInternalKind(vreg));
+ UNREACHABLE();
}
- UNREACHABLE();
- return false;
}
bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
@@ -391,29 +383,29 @@
DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
// its instructions?
DCHECK_LT(vreg, code_item->registers_size_);
- DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map,
- code_item->registers_size_);
- DexRegisterMap::LocationKind location_kind = dex_register_map.GetLocationKind(vreg);
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, code_item->registers_size_);
+ DexRegisterLocation::Kind location_kind = dex_register_map.GetLocationKind(vreg);
uint32_t dex_pc = m->ToDexPc(cur_quick_frame_pc_, false);
switch (location_kind) {
- case DexRegisterMap::kInStack: {
+ case DexRegisterLocation::Kind::kInStack: {
const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg);
uint8_t* addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + offset;
*reinterpret_cast<uint32_t*>(addr) = new_value;
return true;
}
- case DexRegisterMap::kInRegister:
- case DexRegisterMap::kInFpuRegister: {
+ case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInFpuRegister: {
uint32_t reg = dex_register_map.GetMachineRegister(vreg);
return SetRegisterIfAccessible(reg, new_value, kind);
}
- case DexRegisterMap::kConstant:
+ case DexRegisterLocation::Kind::kConstant:
LOG(ERROR) << StringPrintf("Cannot change value of DEX register v%u used as a constant at "
"DEX pc 0x%x (native pc 0x%x) of method %s",
vreg, dex_pc, native_pc_offset,
PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
return false;
- case DexRegisterMap::kNone:
+ case DexRegisterLocation::Kind::kNone:
LOG(ERROR) << StringPrintf("No location for DEX register v%u at DEX pc 0x%x "
"(native pc 0x%x) of method %s",
vreg, dex_pc, native_pc_offset,
diff --git a/runtime/stack.h b/runtime/stack.h
index b495f03..13bd47f 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -25,7 +25,6 @@
#include "gc_root.h"
#include "mirror/object_reference.h"
#include "read_barrier.h"
-#include "throw_location.h"
#include "utils.h"
#include "verify_object.h"
@@ -40,6 +39,7 @@
class ShadowFrame;
class HandleScope;
class ScopedObjectAccess;
+class StackVisitor;
class Thread;
// The kind of vreg being accessed in calls to Set/GetVReg.
@@ -258,8 +258,6 @@
mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ThrowLocation GetCurrentLocationForThrow() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
if (HasReferenceArray()) {
return ((&References()[0] <= shadow_frame_entry_obj) &&
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 6d99672..e88820f 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -23,6 +23,14 @@
namespace art {
+// Size of a frame slot, in bytes. This constant is a signed value,
+// to please the compiler in arithmetic operations involving int32_t
+// (signed) values.
+static ssize_t constexpr kFrameSlotSize = 4;
+
+// Word alignment required on ARM, in bytes.
+static constexpr size_t kWordAlignment = 4;
+
/**
* Classes in the following file are wrapper on stack map information backed
* by a MemoryRegion. As such they read and write to the region, they don't have
@@ -58,6 +66,8 @@
}
private:
+ // TODO: Instead of plain types such as "uint8_t", introduce
+ // typedefs (and document the memory layout of InlineInfo).
static constexpr int kDepthOffset = 0;
static constexpr int kFixedSize = kDepthOffset + sizeof(uint8_t);
@@ -68,82 +78,327 @@
friend class StackMapStream;
};
+// Dex register location container used by DexRegisterMap and StackMapStream.
+class DexRegisterLocation {
+ public:
+ /*
+ * The location kind used to populate the Dex register information in a
+ * StackMapStream can either be:
+ * - kNone: the register has no location yet, meaning it has not been set;
+ * - kConstant: value holds the constant;
+ * - kStack: value holds the stack offset;
+ * - kRegister: value holds the physical register number;
+ * - kFpuRegister: value holds the physical register number.
+ *
+ * In addition, DexRegisterMap also uses these values:
+ * - kInStackLargeOffset: value holds a "large" stack offset (greater than
+ * 128 bytes);
+ * - kConstantLargeValue: value holds a "large" constant (lower than or
+ * equal to -16, or greater than 16).
+ */
+ enum class Kind : uint8_t {
+ // Short location kinds, for entries fitting on one byte (3 bits
+ // for the kind, 5 bits for the value) in a DexRegisterMap.
+ kNone = 0, // 0b000
+ kInStack = 1, // 0b001
+ kInRegister = 2, // 0b010
+ kInFpuRegister = 3, // 0b011
+ kConstant = 4, // 0b100
+
+ // Large location kinds, requiring a 5-byte encoding (1 byte for the
+ // kind, 4 bytes for the value).
+
+ // Stack location at a large offset, meaning that the offset value
+ // divided by the stack frame slot size (4 bytes) cannot fit on a
+ // 5-bit unsigned integer (i.e., this offset value is greater than
+ // or equal to 2^5 * 4 = 128 bytes).
+ kInStackLargeOffset = 5, // 0b101
+
+ // Large constant, that cannot fit on a 5-bit signed integer (i.e.,
+ // lower than -2^(5-1) = -16, or greater than or equal to
+ // 2^(5-1) - 1 = 15).
+ kConstantLargeValue = 6, // 0b110
+
+ kLastLocationKind = kConstantLargeValue
+ };
+
+ static_assert(
+ sizeof(Kind) == 1u,
+ "art::DexRegisterLocation::Kind has a size different from one byte.");
+
+ static const char* PrettyDescriptor(Kind kind) {
+ switch (kind) {
+ case Kind::kNone:
+ return "none";
+ case Kind::kInStack:
+ return "in stack";
+ case Kind::kInRegister:
+ return "in register";
+ case Kind::kInFpuRegister:
+ return "in fpu register";
+ case Kind::kConstant:
+ return "as constant";
+ case Kind::kInStackLargeOffset:
+ return "in stack (large offset)";
+ case Kind::kConstantLargeValue:
+ return "as constant (large value)";
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ static bool IsShortLocationKind(Kind kind) {
+ switch (kind) {
+ case Kind::kNone:
+ case Kind::kInStack:
+ case Kind::kInRegister:
+ case Kind::kInFpuRegister:
+ case Kind::kConstant:
+ return true;
+
+ case Kind::kInStackLargeOffset:
+ case Kind::kConstantLargeValue:
+ return false;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // Convert `kind` to a "surface" kind, i.e. one that doesn't include
+ // any value with a "large" qualifier.
+ // TODO: Introduce another enum type for the surface kind?
+ static Kind ConvertToSurfaceKind(Kind kind) {
+ switch (kind) {
+ case Kind::kNone:
+ case Kind::kInStack:
+ case Kind::kInRegister:
+ case Kind::kInFpuRegister:
+ case Kind::kConstant:
+ return kind;
+
+ case Kind::kInStackLargeOffset:
+ return Kind::kInStack;
+
+ case Kind::kConstantLargeValue:
+ return Kind::kConstant;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ DexRegisterLocation(Kind kind, int32_t value)
+ : kind_(kind), value_(value) {}
+
+ // Get the "surface" kind of the location, i.e., the one that doesn't
+ // include any value with a "large" qualifier.
+ Kind GetKind() const {
+ return ConvertToSurfaceKind(kind_);
+ }
+
+ // Get the value of the location.
+ int32_t GetValue() const { return value_; }
+
+ // Get the actual kind of the location.
+ Kind GetInternalKind() const { return kind_; }
+
+ private:
+ Kind kind_;
+ int32_t value_;
+};
+
/**
* Information on dex register values for a specific PC. The information is
* of the form:
* [location_kind, register_value]+.
- *
- * The location_kind for a Dex register can either be:
- * - kConstant: register_value holds the constant,
- * - kStack: register_value holds the stack offset,
- * - kRegister: register_value holds the physical register number.
- * - kFpuRegister: register_value holds the physical register number.
- * - kNone: the register has no location yet, meaning it has not been set.
+ * either on 1 or 5 bytes (see art::DexRegisterLocation::Kind).
*/
class DexRegisterMap {
public:
explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
- enum LocationKind {
- kNone,
- kInStack,
- kInRegister,
- kInFpuRegister,
- kConstant
- };
+ // Short (compressed) location, fitting on one byte.
+ typedef uint8_t ShortLocation;
- static const char* PrettyDescriptor(LocationKind kind) {
- switch (kind) {
- case kNone:
- return "none";
- case kInStack:
- return "in stack";
- case kInRegister:
- return "in register";
- case kInFpuRegister:
- return "in fpu register";
- case kConstant:
- return "as constant";
+ void SetRegisterInfo(size_t offset, const DexRegisterLocation& dex_register_location) {
+ DexRegisterLocation::Kind kind = ComputeCompressedKind(dex_register_location);
+ int32_t value = dex_register_location.GetValue();
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Compress the kind and the value as a single byte.
+ if (kind == DexRegisterLocation::Kind::kInStack) {
+ // Instead of storing stack offsets expressed in bytes for
+ // short stack locations, store slot offsets. A stack offset
+ // is a multiple of 4 (kFrameSlotSize). This means that by
+ // dividing it by 4, we can fit values from the [0, 128)
+ // interval in a short stack location, and not just values
+ // from the [0, 32) interval.
+ DCHECK_EQ(value % kFrameSlotSize, 0);
+ value /= kFrameSlotSize;
+ }
+ DCHECK(IsUint<kValueBits>(value)) << value;
+ region_.StoreUnaligned<ShortLocation>(offset, MakeShortLocation(kind, value));
+ } else {
+ // Large location. Write the location on one byte and the value
+ // on 4 bytes.
+ DCHECK(!IsUint<kValueBits>(value)) << value;
+ if (kind == DexRegisterLocation::Kind::kInStackLargeOffset) {
+ // Also divide large stack offsets by 4 for the sake of consistency.
+ DCHECK_EQ(value % kFrameSlotSize, 0);
+ value /= kFrameSlotSize;
+ }
+ // Data can be unaligned as the written Dex register locations can
+ // either be 1-byte or 5-byte wide. Use
+ // art::MemoryRegion::StoreUnaligned instead of
+ // art::MemoryRegion::Store to prevent unligned word accesses on ARM.
+ region_.StoreUnaligned<DexRegisterLocation::Kind>(offset, kind);
+ region_.StoreUnaligned<int32_t>(offset + sizeof(DexRegisterLocation::Kind), value);
}
- UNREACHABLE();
- return nullptr;
}
- LocationKind GetLocationKind(uint16_t register_index) const {
- return region_.Load<LocationKind>(
- kFixedSize + register_index * SingleEntrySize());
+ // Find the offset of the Dex register location number `dex_register_index`.
+ size_t FindLocationOffset(uint16_t dex_register_index) const {
+ size_t offset = kFixedSize;
+ // Skip the first `dex_register_index - 1` entries.
+ for (uint16_t i = 0; i < dex_register_index; ++i) {
+ // Read the first next byte and inspect its first 3 bits to decide
+ // whether it is a short or a large location.
+ DexRegisterLocation::Kind kind = ExtractKindAtOffset(offset);
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Skip the current byte.
+ offset += SingleShortEntrySize();
+ } else {
+ // Large location. Skip the 5 next bytes.
+ offset += SingleLargeEntrySize();
+ }
+ }
+ return offset;
}
- void SetRegisterInfo(uint16_t register_index, LocationKind kind, int32_t value) {
- size_t entry = kFixedSize + register_index * SingleEntrySize();
- region_.Store<LocationKind>(entry, kind);
- region_.Store<int32_t>(entry + sizeof(LocationKind), value);
+ // Get the surface kind.
+ DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_index) const {
+ return DexRegisterLocation::ConvertToSurfaceKind(GetLocationInternalKind(dex_register_index));
}
- int32_t GetValue(uint16_t register_index) const {
- return region_.Load<int32_t>(
- kFixedSize + sizeof(LocationKind) + register_index * SingleEntrySize());
+ // Get the internal kind.
+ DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_index) const {
+ size_t offset = FindLocationOffset(dex_register_index);
+ return ExtractKindAtOffset(offset);
}
- int32_t GetStackOffsetInBytes(uint16_t register_index) const {
- DCHECK(GetLocationKind(register_index) == kInStack);
- // We currently encode the offset in bytes.
- return GetValue(register_index);
+ // TODO: Rename as GetDexRegisterLocation?
+ DexRegisterLocation GetLocationKindAndValue(uint16_t dex_register_index) const {
+ size_t offset = FindLocationOffset(dex_register_index);
+ // Read the first byte and inspect its first 3 bits to get the location.
+ ShortLocation first_byte = region_.LoadUnaligned<ShortLocation>(offset);
+ DexRegisterLocation::Kind kind = ExtractKindFromShortLocation(first_byte);
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Extract the value from the remaining 5 bits.
+ int32_t value = ExtractValueFromShortLocation(first_byte);
+ if (kind == DexRegisterLocation::Kind::kInStack) {
+ // Convert the stack slot (short) offset to a byte offset value.
+ value *= kFrameSlotSize;
+ }
+ return DexRegisterLocation(kind, value);
+ } else {
+ // Large location. Read the four next bytes to get the value.
+ int32_t value = region_.LoadUnaligned<int32_t>(offset + sizeof(DexRegisterLocation::Kind));
+ if (kind == DexRegisterLocation::Kind::kInStackLargeOffset) {
+ // Convert the stack slot (large) offset to a byte offset value.
+ value *= kFrameSlotSize;
+ }
+ return DexRegisterLocation(kind, value);
+ }
}
- int32_t GetConstant(uint16_t register_index) const {
- DCHECK(GetLocationKind(register_index) == kConstant);
- return GetValue(register_index);
+ int32_t GetStackOffsetInBytes(uint16_t dex_register_index) const {
+ DexRegisterLocation location = GetLocationKindAndValue(dex_register_index);
+ DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
+ // GetLocationKindAndValue returns the offset in bytes.
+ return location.GetValue();
}
- int32_t GetMachineRegister(uint16_t register_index) const {
- DCHECK(GetLocationKind(register_index) == kInRegister
- || GetLocationKind(register_index) == kInFpuRegister);
- return GetValue(register_index);
+ int32_t GetConstant(uint16_t dex_register_index) const {
+ DexRegisterLocation location = GetLocationKindAndValue(dex_register_index);
+ DCHECK(location.GetKind() == DexRegisterLocation::Kind::kConstant);
+ return location.GetValue();
}
- static size_t SingleEntrySize() {
- return sizeof(LocationKind) + sizeof(int32_t);
+ int32_t GetMachineRegister(uint16_t dex_register_index) const {
+ DexRegisterLocation location = GetLocationKindAndValue(dex_register_index);
+ DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister
+ || location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister)
+ << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
+ return location.GetValue();
+ }
+
+ // Compute the compressed kind of `location`.
+ static DexRegisterLocation::Kind ComputeCompressedKind(const DexRegisterLocation& location) {
+ switch (location.GetInternalKind()) {
+ case DexRegisterLocation::Kind::kNone:
+ DCHECK_EQ(location.GetValue(), 0);
+ return DexRegisterLocation::Kind::kNone;
+
+ case DexRegisterLocation::Kind::kInRegister:
+ DCHECK_GE(location.GetValue(), 0);
+ DCHECK_LT(location.GetValue(), 1 << DexRegisterMap::kValueBits);
+ return DexRegisterLocation::Kind::kInRegister;
+
+ case DexRegisterLocation::Kind::kInFpuRegister:
+ DCHECK_GE(location.GetValue(), 0);
+ DCHECK_LT(location.GetValue(), 1 << DexRegisterMap::kValueBits);
+ return DexRegisterLocation::Kind::kInFpuRegister;
+
+ case DexRegisterLocation::Kind::kInStack:
+ DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0);
+ return IsUint<DexRegisterMap::kValueBits>(location.GetValue() / kFrameSlotSize)
+ ? DexRegisterLocation::Kind::kInStack
+ : DexRegisterLocation::Kind::kInStackLargeOffset;
+
+ case DexRegisterLocation::Kind::kConstant:
+ return IsUint<DexRegisterMap::kValueBits>(location.GetValue())
+ ? DexRegisterLocation::Kind::kConstant
+ : DexRegisterLocation::Kind::kConstantLargeValue;
+
+ default:
+ LOG(FATAL) << "Unexpected location kind"
+ << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
+ UNREACHABLE();
+ }
+ }
+
+ // Can `location` be turned into a short location?
+ static bool CanBeEncodedAsShortLocation(const DexRegisterLocation& location) {
+ switch (location.GetInternalKind()) {
+ case DexRegisterLocation::Kind::kNone:
+ case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInFpuRegister:
+ return true;
+
+ case DexRegisterLocation::Kind::kInStack:
+ DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0);
+ return IsUint<kValueBits>(location.GetValue() / kFrameSlotSize);
+
+ case DexRegisterLocation::Kind::kConstant:
+ return IsUint<kValueBits>(location.GetValue());
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ static size_t EntrySize(const DexRegisterLocation& location) {
+ return CanBeEncodedAsShortLocation(location)
+ ? DexRegisterMap::SingleShortEntrySize()
+ : DexRegisterMap::SingleLargeEntrySize();
+ }
+
+ static size_t SingleShortEntrySize() {
+ return sizeof(ShortLocation);
+ }
+
+ static size_t SingleLargeEntrySize() {
+ return sizeof(DexRegisterLocation::Kind) + sizeof(int32_t);
}
size_t Size() const {
@@ -153,7 +408,43 @@
static constexpr int kFixedSize = 0;
private:
+ // Width of the kind "field" in a short location, in bits.
+ static constexpr size_t kKindBits = 3;
+ // Width of the value "field" in a short location, in bits.
+ static constexpr size_t kValueBits = 5;
+
+ static constexpr uint8_t kKindMask = (1 << kKindBits) - 1;
+ static constexpr int32_t kValueMask = (1 << kValueBits) - 1;
+ static constexpr size_t kKindOffset = 0;
+ static constexpr size_t kValueOffset = kKindBits;
+
+ static ShortLocation MakeShortLocation(DexRegisterLocation::Kind kind, int32_t value) {
+ DCHECK(IsUint<kKindBits>(static_cast<uint8_t>(kind))) << static_cast<uint8_t>(kind);
+ DCHECK(IsUint<kValueBits>(value)) << value;
+ return (static_cast<uint8_t>(kind) & kKindMask) << kKindOffset
+ | (value & kValueMask) << kValueOffset;
+ }
+
+ static DexRegisterLocation::Kind ExtractKindFromShortLocation(ShortLocation location) {
+ uint8_t kind = (location >> kKindOffset) & kKindMask;
+ DCHECK_LE(kind, static_cast<uint8_t>(DexRegisterLocation::Kind::kLastLocationKind));
+ return static_cast<DexRegisterLocation::Kind>(kind);
+ }
+
+ static int32_t ExtractValueFromShortLocation(ShortLocation location) {
+ return (location >> kValueOffset) & kValueMask;
+ }
+
+ // Extract a location kind from the byte at position `offset`.
+ DexRegisterLocation::Kind ExtractKindAtOffset(size_t offset) const {
+ ShortLocation first_byte = region_.LoadUnaligned<ShortLocation>(offset);
+ return ExtractKindFromShortLocation(first_byte);
+ }
+
MemoryRegion region_;
+
+ friend class CodeInfo;
+ friend class StackMapStream;
};
/**
@@ -187,7 +478,7 @@
}
void SetNativePcOffset(uint32_t native_pc_offset) {
- return region_.Store<uint32_t>(kNativePcOffsetOffset, native_pc_offset);
+ region_.Store<uint32_t>(kNativePcOffsetOffset, native_pc_offset);
}
uint32_t GetDexRegisterMapOffset() const {
@@ -195,7 +486,7 @@
}
void SetDexRegisterMapOffset(uint32_t offset) {
- return region_.Store<uint32_t>(kDexRegisterMapOffsetOffset, offset);
+ region_.Store<uint32_t>(kDexRegisterMapOffsetOffset, offset);
}
uint32_t GetInlineDescriptorOffset() const {
@@ -203,7 +494,7 @@
}
void SetInlineDescriptorOffset(uint32_t offset) {
- return region_.Store<uint32_t>(kInlineDescriptorOffsetOffset, offset);
+ region_.Store<uint32_t>(kInlineDescriptorOffsetOffset, offset);
}
uint32_t GetRegisterMask() const {
@@ -240,7 +531,7 @@
static size_t ComputeAlignedStackMapSize(size_t stack_mask_size) {
// On ARM, the stack maps must be 4-byte aligned.
- return RoundUp(StackMap::kFixedSize + stack_mask_size, 4);
+ return RoundUp(StackMap::kFixedSize + stack_mask_size, kWordAlignment);
}
// Special (invalid) offset for the DexRegisterMapOffset field meaning
@@ -252,6 +543,8 @@
static constexpr uint32_t kNoInlineInfo = -1;
private:
+ // TODO: Instead of plain types such as "uint32_t", introduce
+ // typedefs (and document the memory layout of StackMap).
static constexpr int kDexPcOffset = 0;
static constexpr int kNativePcOffsetOffset = kDexPcOffset + sizeof(uint32_t);
static constexpr int kDexRegisterMapOffsetOffset = kNativePcOffsetOffset + sizeof(uint32_t);
@@ -317,11 +610,15 @@
return StackMap::ComputeAlignedStackMapSize(GetStackMaskSize());
}
+ uint32_t GetStackMapsOffset() const {
+ return kFixedSize;
+ }
+
DexRegisterMap GetDexRegisterMapOf(StackMap stack_map, uint32_t number_of_dex_registers) const {
DCHECK(stack_map.HasDexRegisterMap());
uint32_t offset = stack_map.GetDexRegisterMapOffset();
- return DexRegisterMap(region_.Subregion(offset,
- DexRegisterMap::kFixedSize + number_of_dex_registers * DexRegisterMap::SingleEntrySize()));
+ size_t size = ComputeDexRegisterMapSize(offset, number_of_dex_registers);
+ return DexRegisterMap(region_.Subregion(offset, size));
}
InlineInfo GetInlineInfoOf(StackMap stack_map) const {
@@ -356,6 +653,8 @@
}
private:
+ // TODO: Instead of plain types such as "uint32_t", introduce
+ // typedefs (and document the memory layout of CodeInfo).
static constexpr int kOverallSizeOffset = 0;
static constexpr int kNumberOfStackMapsOffset = kOverallSizeOffset + sizeof(uint32_t);
static constexpr int kStackMaskSizeOffset = kNumberOfStackMapsOffset + sizeof(uint32_t);
@@ -367,6 +666,33 @@
: region_.Subregion(kFixedSize, StackMapSize() * GetNumberOfStackMaps());
}
+ // Compute the size of a Dex register map starting at offset `origin` in
+ // `region_` and containing `number_of_dex_registers` locations.
+ size_t ComputeDexRegisterMapSize(uint32_t origin, uint32_t number_of_dex_registers) const {
+ // TODO: Ideally, we would like to use art::DexRegisterMap::Size or
+ // art::DexRegisterMap::FindLocationOffset, but the DexRegisterMap is not
+ // yet built. Try to factor common code.
+ size_t offset = origin + DexRegisterMap::kFixedSize;
+ // Skip the first `number_of_dex_registers - 1` entries.
+ for (uint16_t i = 0; i < number_of_dex_registers; ++i) {
+ // Read the first next byte and inspect its first 3 bits to decide
+ // whether it is a short or a large location.
+ DexRegisterMap::ShortLocation first_byte =
+ region_.LoadUnaligned<DexRegisterMap::ShortLocation>(offset);
+ DexRegisterLocation::Kind kind =
+ DexRegisterMap::ExtractKindFromShortLocation(first_byte);
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Skip the current byte.
+ offset += DexRegisterMap::SingleShortEntrySize();
+ } else {
+ // Large location. Skip the 5 next bytes.
+ offset += DexRegisterMap::SingleLargeEntrySize();
+ }
+ }
+ size_t size = offset - origin;
+ return size;
+ }
+
MemoryRegion region_;
friend class StackMapStream;
};
diff --git a/runtime/thread.cc b/runtime/thread.cc
index fdb1f9d..e8e9355 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1033,18 +1033,11 @@
// assumption that there is no exception pending on entry. Thus, stash any pending exception.
// Thread::Current() instead of this in case a thread is dumping the stack of another suspended
// thread.
- StackHandleScope<3> scope(Thread::Current());
+ StackHandleScope<1> scope(Thread::Current());
Handle<mirror::Throwable> exc;
- Handle<mirror::Object> throw_location_this_object;
- Handle<mirror::ArtMethod> throw_location_method;
- uint32_t throw_location_dex_pc;
bool have_exception = false;
if (IsExceptionPending()) {
- ThrowLocation exc_location;
- exc = scope.NewHandle(GetException(&exc_location));
- throw_location_this_object = scope.NewHandle(exc_location.GetThis());
- throw_location_method = scope.NewHandle(exc_location.GetMethod());
- throw_location_dex_pc = exc_location.GetDexPc();
+ exc = scope.NewHandle(GetException());
const_cast<Thread*>(this)->ClearException();
have_exception = true;
}
@@ -1055,10 +1048,7 @@
dumper.WalkStack();
if (have_exception) {
- ThrowLocation exc_location(throw_location_this_object.Get(),
- throw_location_method.Get(),
- throw_location_dex_pc);
- const_cast<Thread*>(this)->SetException(exc_location, exc.Get());
+ const_cast<Thread*>(this)->SetException(exc.Get());
}
}
@@ -1148,8 +1138,6 @@
Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupted_(false) {
wait_mutex_ = new Mutex("a thread wait mutex");
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
- tlsPtr_.debug_invoke_req = new DebugInvokeReq;
- tlsPtr_.single_step_control = nullptr;
tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
tlsPtr_.name = new std::string(kThreadNameDuringStartup);
tlsPtr_.nested_signal_state = static_cast<jmp_buf*>(malloc(sizeof(jmp_buf)));
@@ -1188,7 +1176,7 @@
void Thread::AssertNoPendingException() const {
if (UNLIKELY(IsExceptionPending())) {
ScopedObjectAccess soa(Thread::Current());
- mirror::Throwable* exception = GetException(nullptr);
+ mirror::Throwable* exception = GetException();
LOG(FATAL) << "No pending exception expected: " << exception->Dump();
}
}
@@ -1196,7 +1184,7 @@
void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
if (UNLIKELY(IsExceptionPending())) {
ScopedObjectAccess soa(Thread::Current());
- mirror::Throwable* exception = GetException(nullptr);
+ mirror::Throwable* exception = GetException();
LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
<< exception->Dump();
}
@@ -1301,7 +1289,6 @@
CleanupCpu();
}
- delete tlsPtr_.debug_invoke_req;
if (tlsPtr_.single_step_control != nullptr) {
delete tlsPtr_.single_step_control;
}
@@ -1715,50 +1702,44 @@
return result;
}
-void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location,
- const char* exception_class_descriptor, const char* fmt, ...) {
+void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowNewExceptionV(throw_location, exception_class_descriptor,
- fmt, args);
+ ThrowNewExceptionV(exception_class_descriptor, fmt, args);
va_end(args);
}
-void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location,
- const char* exception_class_descriptor,
+void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
const char* fmt, va_list ap) {
std::string msg;
StringAppendV(&msg, fmt, ap);
- ThrowNewException(throw_location, exception_class_descriptor, msg.c_str());
+ ThrowNewException(exception_class_descriptor, msg.c_str());
}
-void Thread::ThrowNewException(const ThrowLocation& throw_location,
- const char* exception_class_descriptor,
+void Thread::ThrowNewException(const char* exception_class_descriptor,
const char* msg) {
// Callers should either clear or call ThrowNewWrappedException.
AssertNoPendingExceptionForNewException(msg);
- ThrowNewWrappedException(throw_location, exception_class_descriptor, msg);
+ ThrowNewWrappedException(exception_class_descriptor, msg);
}
-void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
- const char* exception_class_descriptor,
+static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+ return method != nullptr
+ ? method->GetDeclaringClass()->GetClassLoader()
+ : nullptr;
+}
+
+void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
const char* msg) {
DCHECK_EQ(this, Thread::Current());
ScopedObjectAccessUnchecked soa(this);
- StackHandleScope<5> hs(soa.Self());
- // Ensure we don't forget arguments over object allocation.
- Handle<mirror::Object> saved_throw_this(hs.NewHandle(throw_location.GetThis()));
- Handle<mirror::ArtMethod> saved_throw_method(hs.NewHandle(throw_location.GetMethod()));
- // Ignore the cause throw location. TODO: should we report this as a re-throw?
- ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException(nullptr)));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
+ ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
ClearException();
Runtime* runtime = Runtime::Current();
-
- mirror::ClassLoader* cl = nullptr;
- if (saved_throw_method.Get() != nullptr) {
- cl = saved_throw_method.Get()->GetDeclaringClass()->GetClassLoader();
- }
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(cl));
Handle<mirror::Class> exception_class(
hs.NewHandle(runtime->GetClassLinker()->FindClass(this, exception_class_descriptor,
class_loader)));
@@ -1779,9 +1760,7 @@
// If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
if (exception.Get() == nullptr) {
- ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
- throw_location.GetDexPc());
- SetException(gc_safe_throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
+ SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
return;
}
@@ -1831,9 +1810,7 @@
if (trace.get() != nullptr) {
exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
}
- ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
- throw_location.GetDexPc());
- SetException(gc_safe_throw_location, exception.Get());
+ SetException(exception.Get());
} else {
jvalue jv_args[2];
size_t i = 0;
@@ -1848,9 +1825,7 @@
}
InvokeWithJValues(soa, exception.Get(), soa.EncodeMethod(exception_init_method), jv_args);
if (LIKELY(!IsExceptionPending())) {
- ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
- throw_location.GetDexPc());
- SetException(gc_safe_throw_location, exception.Get());
+ SetException(exception.Get());
}
}
}
@@ -1858,14 +1833,13 @@
void Thread::ThrowOutOfMemoryError(const char* msg) {
LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
- ThrowLocation throw_location = GetCurrentLocationForThrow();
if (!tls32_.throwing_OutOfMemoryError) {
tls32_.throwing_OutOfMemoryError = true;
- ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg);
+ ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
tls32_.throwing_OutOfMemoryError = false;
} else {
Dump(LOG(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
- SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
+ SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
}
}
@@ -2030,8 +2004,7 @@
void Thread::QuickDeliverException() {
// Get exception from thread.
- ThrowLocation throw_location;
- mirror::Throwable* exception = GetException(&throw_location);
+ mirror::Throwable* exception = GetException();
CHECK(exception != nullptr);
// Don't leave exception visible while we try to find the handler, which may cause class
// resolution.
@@ -2041,7 +2014,7 @@
if (is_deoptimization) {
exception_handler.DeoptimizeStack();
} else {
- exception_handler.FindCatch(throw_location, exception);
+ exception_handler.FindCatch(exception);
}
exception_handler.UpdateInstrumentationStack();
exception_handler.DoLongJump();
@@ -2093,14 +2066,6 @@
return visitor.method_;
}
-ThrowLocation Thread::GetCurrentLocationForThrow() {
- Context* context = GetLongJumpContext();
- CurrentMethodVisitor visitor(this, context, true);
- visitor.WalkStack(false);
- ReleaseLongJumpContext(context);
- return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
-}
-
bool Thread::HoldsLock(mirror::Object* object) const {
if (object == nullptr) {
return false;
@@ -2302,7 +2267,6 @@
visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), arg,
RootInfo(kRootNativeStack, thread_id));
}
- tlsPtr_.throw_location.VisitRoots(visitor, arg);
if (tlsPtr_.monitor_enter_object != nullptr) {
visitor(&tlsPtr_.monitor_enter_object, arg, RootInfo(kRootNativeStack, thread_id));
}
@@ -2436,4 +2400,21 @@
delete ssc;
}
+void Thread::SetDebugInvokeReq(DebugInvokeReq* req) {
+ CHECK(Dbg::IsDebuggerActive());
+ CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this;
+ CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself";
+ CHECK(req != nullptr);
+ tlsPtr_.debug_invoke_req = req;
+}
+
+void Thread::ClearDebugInvokeReq() {
+ CHECK(Dbg::IsDebuggerActive());
+ CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
+ CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
+ // We do not own the DebugInvokeReq* so we must not delete it, it is the responsibility of
+ // the owner (the JDWP thread).
+ tlsPtr_.debug_invoke_req = nullptr;
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index e4c91b7..2e9ae3c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -41,7 +41,6 @@
#include "runtime_stats.h"
#include "stack.h"
#include "thread_state.h"
-#include "throw_location.h"
namespace art {
@@ -326,11 +325,7 @@
return tlsPtr_.exception != nullptr;
}
- mirror::Throwable* GetException(ThrowLocation* throw_location) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (throw_location != nullptr) {
- *throw_location = tlsPtr_.throw_location;
- }
+ mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return tlsPtr_.exception;
}
@@ -338,17 +333,15 @@
void AssertNoPendingException() const;
void AssertNoPendingExceptionForNewException(const char* msg) const;
- void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
+ void SetException(mirror::Throwable* new_exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(new_exception != NULL);
// TODO: DCHECK(!IsExceptionPending());
tlsPtr_.exception = new_exception;
- tlsPtr_.throw_location = throw_location;
}
void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
tlsPtr_.exception = nullptr;
- tlsPtr_.throw_location.Clear();
}
// Find catch block and perform long jump to appropriate exception handle
@@ -370,8 +363,6 @@
bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method) {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
}
@@ -386,24 +377,19 @@
}
// If 'msg' is NULL, no detail message is set.
- void ThrowNewException(const ThrowLocation& throw_location,
- const char* exception_class_descriptor, const char* msg)
+ void ThrowNewException(const char* exception_class_descriptor, const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
// used as the new exception's cause.
- void ThrowNewWrappedException(const ThrowLocation& throw_location,
- const char* exception_class_descriptor,
- const char* msg)
+ void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ThrowNewExceptionF(const ThrowLocation& throw_location,
- const char* exception_class_descriptor, const char* fmt, ...)
- __attribute__((format(printf, 4, 5)))
+ void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
+ __attribute__((format(printf, 3, 4)))
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ThrowNewExceptionV(const ThrowLocation& throw_location,
- const char* exception_class_descriptor, const char* fmt, va_list ap)
+ void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// OutOfMemoryError is special, because we need to pre-allocate an instance.
@@ -713,6 +699,16 @@
return tlsPtr_.single_step_control;
}
+ // Indicates whether this thread is ready to invoke a method for debugging. This
+ // is only true if the thread has been suspended by a debug event.
+ bool IsReadyForDebugInvoke() const {
+ return tls32_.ready_for_debug_invoke;
+ }
+
+ void SetReadyForDebugInvoke(bool ready) {
+ tls32_.ready_for_debug_invoke = ready;
+ }
+
// Activates single step control for debugging. The thread takes the
// ownership of the given SingleStepControl*. It is deleted by a call
// to DeactivateSingleStepControl or upon thread destruction.
@@ -721,6 +717,17 @@
// Deactivates single step control for debugging.
void DeactivateSingleStepControl();
+ // Sets debug invoke request for debugging. When the thread is resumed,
+ // it executes the method described by this request then suspends itself.
+ // The thread does not take ownership of the given DebugInvokeReq*, it is
+ // owned by the JDWP thread which is waiting for the execution of the
+ // method.
+ void SetDebugInvokeReq(DebugInvokeReq* req);
+
+ // Clears debug invoke request for debugging. When the thread completes
+ // method invocation, it clears its debug invoke request, signals the
+ // JDWP thread and suspends itself.
+ void ClearDebugInvokeReq();
// Returns the fake exception used to activate deoptimization.
static mirror::Throwable* GetDeoptimizationException() {
@@ -972,7 +979,8 @@
explicit tls_32bit_sized_values(bool is_daemon) :
suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
- thread_exit_check_count(0), handling_signal_(false), suspended_at_suspend_check(false) {
+ thread_exit_check_count(0), handling_signal_(false), suspended_at_suspend_check(false),
+ ready_for_debug_invoke(false) {
}
union StateAndFlags state_and_flags;
@@ -1016,6 +1024,11 @@
// used to distinguish runnable threads that are suspended due to
// a normal suspend check from other threads.
bool32_t suspended_at_suspend_check;
+
+ // True if the thread has been suspended by a debugger event. This is
+ // used to invoke method from the debugger which is only allowed when
+ // the thread is suspended by an event.
+ bool32_t ready_for_debug_invoke;
} tls32_;
struct PACKED(8) tls_64bit_sized_values {
@@ -1034,7 +1047,7 @@
struct PACKED(4) tls_ptr_sized_values {
tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
- jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
+ jpeer(nullptr), stack_begin(nullptr), stack_size(0),
stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
@@ -1084,9 +1097,6 @@
// Size of the stack.
size_t stack_size;
- // The location the current exception was thrown from.
- ThrowLocation throw_location;
-
// Pointer to previous stack trace captured by sampling profiler.
std::vector<mirror::ArtMethod*>* stack_trace_sample;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index d4c1e8c..ddfbebd 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -860,7 +860,8 @@
}
void ThreadList::SuspendSelfForDebugger() {
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
+ self->SetReadyForDebugInvoke(true);
// The debugger thread must not suspend itself due to debugger activity!
Thread* debug_thread = Dbg::GetDebugThread();
@@ -881,11 +882,10 @@
VLOG(threads) << *self << " self-suspending (debugger)";
// Tell JDWP we've completed invocation and are ready to suspend.
- DebugInvokeReq* pReq = self->GetInvokeReq();
- DCHECK(pReq != NULL);
- if (pReq->invoke_needed) {
- // Clear this before signaling.
- pReq->Clear();
+ DebugInvokeReq* const pReq = self->GetInvokeReq();
+ if (pReq != nullptr) {
+ // Clear debug invoke request before signaling.
+ self->ClearDebugInvokeReq();
VLOG(jdwp) << "invoke complete, signaling";
MutexLock mu(self, pReq->lock);
@@ -916,6 +916,7 @@
CHECK_EQ(self->GetSuspendCount(), 0);
}
+ self->SetReadyForDebugInvoke(false);
VLOG(threads) << *self << " self-reviving (debugger)";
}
diff --git a/runtime/throw_location.cc b/runtime/throw_location.cc
deleted file mode 100644
index 4d2aec0..0000000
--- a/runtime/throw_location.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "throw_location.h"
-
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "utils.h"
-
-namespace art {
-
-std::string ThrowLocation::Dump() const {
- if (method_ != nullptr) {
- return StringPrintf("%s:%d", PrettyMethod(method_).c_str(),
- method_->GetLineNumFromDexPC(dex_pc_));
- } else {
- return "unknown throw location";
- }
-}
-
-void ThrowLocation::VisitRoots(RootCallback* visitor, void* arg) {
- if (this_object_ != nullptr) {
- visitor(&this_object_, arg, RootInfo(kRootVMInternal));
- DCHECK(this_object_ != nullptr);
- }
- if (method_ != nullptr) {
- visitor(reinterpret_cast<mirror::Object**>(&method_), arg, RootInfo(kRootVMInternal));
- DCHECK(method_ != nullptr);
- }
-}
-
-} // namespace art
diff --git a/runtime/throw_location.h b/runtime/throw_location.h
deleted file mode 100644
index bec0da4..0000000
--- a/runtime/throw_location.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_THROW_LOCATION_H_
-#define ART_RUNTIME_THROW_LOCATION_H_
-
-#include "object_callbacks.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "gc_root.h"
-
-#include <stdint.h>
-#include <string>
-
-namespace art {
-
-namespace mirror {
-class ArtMethod;
-class Object;
-} // mirror
-
-class PACKED(4) ThrowLocation {
- public:
- ThrowLocation() {
- Clear();
- }
-
- ThrowLocation(mirror::Object* throw_this_object, mirror::ArtMethod* throw_method,
- uint32_t throw_dex_pc) :
- this_object_(throw_this_object),
- method_(throw_method),
- dex_pc_(throw_dex_pc)
-#ifdef __LP64__
- , pad_(0)
-#endif
-
- {
-#ifdef __LP64__
- UNUSED(pad_);
-#endif
- }
-
- mirror::Object* GetThis() const {
- return this_object_;
- }
-
- mirror::ArtMethod* GetMethod() const {
- return method_;
- }
-
- uint32_t GetDexPc() const {
- return dex_pc_;
- }
-
- void Clear() {
- this_object_ = NULL;
- method_ = NULL;
- dex_pc_ = -1;
- }
-
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void VisitRoots(RootCallback* visitor, void* arg);
-
- private:
- // The 'this' reference of the throwing method.
- mirror::Object* this_object_;
- // The throwing method.
- mirror::ArtMethod* method_;
- // The instruction within the throwing method.
- uint32_t dex_pc_;
- // Ensure 8byte alignment on 64bit.
-#ifdef __LP64__
- uint32_t pad_;
-#endif
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_THROW_LOCATION_H_
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 93b3877..8833a85 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -401,9 +401,8 @@
void Trace::Stop() {
bool stop_alloc_counting = false;
- Runtime* runtime = Runtime::Current();
- runtime->GetThreadList()->SuspendAll();
- Trace* the_trace = NULL;
+ Runtime* const runtime = Runtime::Current();
+ Trace* the_trace = nullptr;
pthread_t sampling_pthread = 0U;
{
MutexLock mu(Thread::Current(), *Locks::trace_lock_);
@@ -415,19 +414,27 @@
sampling_pthread = sampling_pthread_;
}
}
- if (the_trace != NULL) {
+ // Make sure that we join before we delete the trace since we don't want to have
+ // the sampling thread access a stale pointer. This finishes since the sampling thread exits when
+ // the_trace_ is null.
+ if (sampling_pthread != 0U) {
+ CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown");
+ sampling_pthread_ = 0U;
+ }
+ runtime->GetThreadList()->SuspendAll();
+ if (the_trace != nullptr) {
stop_alloc_counting = (the_trace->flags_ & kTraceCountAllocs) != 0;
the_trace->FinishTracing();
if (the_trace->sampling_enabled_) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, NULL);
+ runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
} else {
runtime->GetInstrumentation()->DisableMethodTracing();
- runtime->GetInstrumentation()->RemoveListener(the_trace,
- instrumentation::Instrumentation::kMethodEntered |
- instrumentation::Instrumentation::kMethodExited |
- instrumentation::Instrumentation::kMethodUnwind);
+ runtime->GetInstrumentation()->RemoveListener(
+ the_trace, instrumentation::Instrumentation::kMethodEntered |
+ instrumentation::Instrumentation::kMethodExited |
+ instrumentation::Instrumentation::kMethodUnwind);
}
if (the_trace->trace_file_.get() != nullptr) {
// Do not try to erase, so flush and close explicitly.
@@ -441,15 +448,9 @@
delete the_trace;
}
runtime->GetThreadList()->ResumeAll();
-
if (stop_alloc_counting) {
// Can be racy since SetStatsEnabled is not guarded by any locks.
- Runtime::Current()->SetStatsEnabled(false);
- }
-
- if (sampling_pthread != 0U) {
- CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown");
- sampling_pthread_ = 0U;
+ runtime->SetStatsEnabled(false);
}
}
@@ -619,11 +620,9 @@
thread_clock_diff, wall_clock_diff);
}
-void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object)
+void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- UNUSED(thread, throw_location, catch_method, catch_dex_pc, exception_object);
+ UNUSED(thread, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
diff --git a/runtime/trace.h b/runtime/trace.h
index 9ba30d5..dd8186a 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -95,9 +95,7 @@
mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
const JValue& field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
- void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object)
+ void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index c0fd7a5..3b708f6 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -75,8 +75,7 @@
CHECK(IsAborted()) << "Rethrow InternalError while transaction is not aborted";
}
std::string abort_msg(GetAbortMessage());
- self->ThrowNewException(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
- abort_msg.c_str());
+ self->ThrowNewWrappedException("Ljava/lang/InternalError;", abort_msg.c_str());
}
bool Transaction::IsAborted() {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 851eceb..8a23ff7 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1508,23 +1508,6 @@
return filename;
}
-std::string DexFilenameToOdexFilename(const std::string& location, const InstructionSet isa) {
- // location = /foo/bar/baz.jar
- // odex_location = /foo/bar/<isa>/baz.odex
- std::string odex_location(location);
- InsertIsaDirectory(isa, &odex_location);
- size_t dot_index = odex_location.rfind('.');
-
- // The location must have an extension, otherwise it's not clear what we
- // should return.
- CHECK_NE(dot_index, std::string::npos) << odex_location;
- CHECK_EQ(std::string::npos, odex_location.find('/', dot_index)) << odex_location;
-
- odex_location.resize(dot_index + 1);
- odex_location += "odex";
- return odex_location;
-}
-
bool IsZipMagic(uint32_t magic) {
return (('P' == ((magic >> 0) & 0xff)) &&
('K' == ((magic >> 8) & 0xff)));
diff --git a/runtime/utils.h b/runtime/utils.h
index 9d04d35..9a9f51a 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -173,6 +173,24 @@
return static_cast<uint32_t>(value >> 32);
}
+// Traits class providing an unsigned integer type of (byte) size `n`.
+template <size_t n>
+struct UnsignedIntegerType {
+ // No defined `type`.
+};
+
+template <>
+struct UnsignedIntegerType<1> { typedef uint8_t type; };
+
+template <>
+struct UnsignedIntegerType<2> { typedef uint16_t type; };
+
+template <>
+struct UnsignedIntegerType<4> { typedef uint32_t type; };
+
+template <>
+struct UnsignedIntegerType<8> { typedef uint64_t type; };
+
// Type identity.
template <typename T>
struct TypeIdentity {
@@ -271,6 +289,12 @@
}
template<typename T>
+static inline int WhichPowerOf2(T x) {
+ DCHECK((x != 0) && IsPowerOfTwo(x));
+ return CTZ(x);
+}
+
+template<typename T>
static constexpr int POPCOUNT(T x) {
return (sizeof(T) == sizeof(uint32_t))
? __builtin_popcount(x)
@@ -309,7 +333,7 @@
// Tests whether 's' starts with 'prefix'.
bool StartsWith(const std::string& s, const char* prefix);
-// Tests whether 's' starts with 'suffix'.
+// Tests whether 's' ends with 'suffix'.
bool EndsWith(const std::string& s, const char* suffix);
// Used to implement PrettyClass, PrettyField, PrettyMethod, and PrettyTypeOf,
@@ -516,12 +540,6 @@
// Returns the system location for an image
std::string GetSystemImageFilename(const char* location, InstructionSet isa);
-// Returns an .odex file name adjacent to the dex location.
-// For example, for "/foo/bar/baz.jar", return "/foo/bar/<isa>/baz.odex".
-// The dex location must include a directory component and have an extension.
-// Note: does not support multidex location strings.
-std::string DexFilenameToOdexFilename(const std::string& location, InstructionSet isa);
-
// Check whether the given magic matches a known file type.
bool IsZipMagic(uint32_t magic);
bool IsDexMagic(uint32_t magic);
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 5465762..6b36c19 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -371,13 +371,6 @@
GetSystemImageFilename("/system/framework/boot.art", kArm).c_str());
}
-TEST_F(UtilsTest, DexFilenameToOdexFilename) {
- EXPECT_STREQ("/foo/bar/arm/baz.odex",
- DexFilenameToOdexFilename("/foo/bar/baz.jar", kArm).c_str());
- EXPECT_STREQ("/foo/bar/arm/baz.odex",
- DexFilenameToOdexFilename("/foo/bar/baz.funnyext", kArm).c_str());
-}
-
TEST_F(UtilsTest, ExecSuccess) {
std::vector<std::string> command;
if (kIsTargetBuild) {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 87a29ed..b3f686d 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -513,7 +513,7 @@
}
const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
const bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
- return GetQuickInvokedMethod(inst, register_line, is_range);
+ return GetQuickInvokedMethod(inst, register_line, is_range, false);
}
bool MethodVerifier::Verify() {
@@ -572,6 +572,17 @@
// If we fail again at runtime, mark that this instruction would throw and force this
// method to be executed using the interpreter with checks.
have_pending_runtime_throw_failure_ = true;
+
+ // We need to save the work_line if the instruction wasn't throwing before. Otherwise we'll
+ // try to merge garbage.
+ // Note: this assumes that Fail is called before we do any work_line modifications.
+ const uint16_t* insns = code_item_->insns_ + work_insn_idx_;
+ const Instruction* inst = Instruction::At(insns);
+ int opcode_flags = Instruction::FlagsOf(inst->Opcode());
+
+ if ((opcode_flags & Instruction::kThrow) == 0 && CurrentInsnFlags()->IsInTry()) {
+ saved_line_->CopyFromLine(work_line_.get());
+ }
}
break;
// Indication that verification should be retried at runtime.
@@ -3431,10 +3442,14 @@
}
mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst,
- RegisterLine* reg_line, bool is_range) {
- DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
- inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
- const RegType& actual_arg_type = reg_line->GetInvocationThis(this, inst, is_range);
+ RegisterLine* reg_line, bool is_range,
+ bool allow_failure) {
+ if (is_range) {
+ DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
+ } else {
+ DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_QUICK);
+ }
+ const RegType& actual_arg_type = reg_line->GetInvocationThis(this, inst, is_range, allow_failure);
if (!actual_arg_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
return nullptr;
@@ -3445,29 +3460,29 @@
// Derive Object.class from Class.class.getSuperclass().
mirror::Class* object_klass = klass->GetClass()->GetSuperClass();
if (FailOrAbort(this, object_klass->IsObjectClass(),
- "Failed to find Object class in quickened invoke receiver",
- work_insn_idx_)) {
+ "Failed to find Object class in quickened invoke receiver", work_insn_idx_)) {
return nullptr;
}
dispatch_class = object_klass;
} else {
dispatch_class = klass;
}
- if (FailOrAbort(this, dispatch_class->HasVTable(),
- "Receiver class has no vtable for quickened invoke at ",
- work_insn_idx_)) {
+ if (!dispatch_class->HasVTable()) {
+ FailOrAbort(this, allow_failure, "Receiver class has no vtable for quickened invoke at ",
+ work_insn_idx_);
return nullptr;
}
uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- if (FailOrAbort(this, static_cast<int32_t>(vtable_index) < dispatch_class->GetVTableLength(),
- "Receiver class has not enough vtable slots for quickened invoke at ",
- work_insn_idx_)) {
+ if (static_cast<int32_t>(vtable_index) >= dispatch_class->GetVTableLength()) {
+ FailOrAbort(this, allow_failure,
+ "Receiver class has not enough vtable slots for quickened invoke at ",
+ work_insn_idx_);
return nullptr;
}
mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
- if (FailOrAbort(this, !self_->IsExceptionPending(),
- "Unexpected exception pending for quickened invoke at ",
- work_insn_idx_)) {
+ if (self_->IsExceptionPending()) {
+ FailOrAbort(this, allow_failure, "Unexpected exception pending for quickened invoke at ",
+ work_insn_idx_);
return nullptr;
}
return res_method;
@@ -3478,8 +3493,7 @@
DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_)
<< PrettyMethod(dex_method_idx_, *dex_file_, true) << "@" << work_insn_idx_;
- mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(),
- is_range);
+ mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(), is_range, false);
if (res_method == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name();
return nullptr;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index bdd6259..d7c2071 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -244,7 +244,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the method of a quick invoke or nullptr if it cannot be found.
mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
- bool is_range)
+ bool is_range, bool allow_failure)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the access field of a quick field access (iget/iput-quick) or nullptr
// if it cannot be found.
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 3b09871..ed588fc 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -81,18 +81,23 @@
}
const RegType& RegisterLine::GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
- bool is_range) {
+ bool is_range, bool allow_failure) {
const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
if (args_count < 1) {
- verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
+ if (!allow_failure) {
+ verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
+ }
return verifier->GetRegTypeCache()->Conflict();
}
/* Get the element type of the array held in vsrc */
const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
const RegType& this_type = GetRegisterType(verifier, this_reg);
if (!this_type.IsReferenceTypes()) {
- verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
- << this_reg << " (type=" << this_type << ")";
+ if (!allow_failure) {
+ verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "tried to get class from non-reference register v" << this_reg
+ << " (type=" << this_type << ")";
+ }
return verifier->GetRegTypeCache()->Conflict();
}
return this_type;
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index ca61a0b..376dbf1 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -188,9 +188,11 @@
*
* The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
* versions. We just need to make sure vA is >= 1 and then return vC.
+ * allow_failure will return Conflict() instead of causing a verification failure if there is an
+ * error.
*/
const RegType& GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
- bool is_range)
+ bool is_range, bool allow_failure = false)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
diff --git a/test/080-oom-throw-with-finalizer/src/Main.java b/test/080-oom-throw-with-finalizer/src/Main.java
index 57e9721..61a1b75 100644
--- a/test/080-oom-throw-with-finalizer/src/Main.java
+++ b/test/080-oom-throw-with-finalizer/src/Main.java
@@ -59,13 +59,22 @@
// Keep holder alive to make instance OOM happen faster.
holder = new char[128 * 1024][];
if (!triggerArrayOOM(holder)) {
+ // The test failed here. To avoid potential OOME during println,
+ // make holder unreachable.
+ holder = null;
System.out.println("NEW_ARRAY did not throw OOME");
}
if (!triggerInstanceFinalizerOOM()) {
+ // The test failed here. To avoid potential OOME during println,
+ // make holder unreachable.
+ holder = null;
System.out.println("NEW_INSTANCE (finalize) did not throw OOME");
}
+ // Make holder unreachable here so that the Sentinel
+ // allocation in runFinalization() won't fail.
+ holder = null;
System.runFinalization();
}
}
diff --git a/test/135-MirandaDispatch/expected.txt b/test/135-MirandaDispatch/expected.txt
new file mode 100644
index 0000000..134d8d0
--- /dev/null
+++ b/test/135-MirandaDispatch/expected.txt
@@ -0,0 +1 @@
+Finishing
diff --git a/test/135-MirandaDispatch/info.txt b/test/135-MirandaDispatch/info.txt
new file mode 100644
index 0000000..22d2777
--- /dev/null
+++ b/test/135-MirandaDispatch/info.txt
@@ -0,0 +1,6 @@
+Regression test for JIT related incompatible class changes caused by miranda methods.
+E.g.
+java.lang.IncompatibleClassChangeError: The method 'void Main$TheInterface.m()' was expected to be of type virtual but instead was found to be of type interface (declaration of 'java.lang.reflect.ArtMethod' appears in out/host/linux-x86/framework/core-libart-hostdex.jar)
+ at Main.DoStuff(Main.java:37)
+ at Main.main(Main.java:44)
+
diff --git a/test/135-MirandaDispatch/src/Main.java b/test/135-MirandaDispatch/src/Main.java
new file mode 100644
index 0000000..bb005b0
--- /dev/null
+++ b/test/135-MirandaDispatch/src/Main.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ // Enough to trigger JIT.
+ static final int loopIterations = 5000;
+ static int counter = 0;
+
+ static interface TheInterface {
+ public void m();
+ }
+
+ static abstract class AbstractClass implements TheInterface {
+ }
+
+ static class ConcreteClass extends AbstractClass {
+ public void m() {
+ ++counter;
+ }
+ }
+
+ static void doStuff(AbstractClass c) {
+ for (int i = 0; i < loopIterations; ++i) {
+ c.m();
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ ConcreteClass o = new ConcreteClass();
+ for (int i = 0; i < loopIterations; ++i) {
+ doStuff(o);
+ }
+ if (counter != loopIterations * loopIterations) {
+ System.out.println("Expected " + loopIterations * loopIterations + " got " + counter);
+ }
+ System.out.println("Finishing");
+ }
+}
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index de2c5c7..6b21fed 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -16,6 +16,18 @@
public class Main {
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertLongEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
/**
* Tiny three-register program exercising int constant folding
* on negation.
@@ -219,41 +231,203 @@
return c;
}
+ /**
+ * Test optimizations of arithmetic identities yielding a constant result.
+ */
+
+ // CHECK-START: int Main.And0(int) constant_folding (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[And:i\d+]] And [ [[Arg]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[And]] ]
+
+ // CHECK-START: int Main.And0(int) constant_folding (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-NOT: And
+ // CHECK-DAG: Return [ [[Const0]] ]
+
+ public static int And0(int arg) {
+ return arg & 0;
+ }
+
+ // CHECK-START: long Main.Mul0(long) constant_folding (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: [[Mul:j\d+]] Mul [ [[Arg]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[Mul]] ]
+
+ // CHECK-START: long Main.Mul0(long) constant_folding (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-NOT: Mul
+ // CHECK-DAG: Return [ [[Const0]] ]
+
+ public static long Mul0(long arg) {
+ return arg * 0;
+ }
+
+ // CHECK-START: int Main.OrAllOnes(int) constant_folding (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[ConstF:i\d+]] IntConstant -1
+ // CHECK-DAG: [[Or:i\d+]] Or [ [[Arg]] [[ConstF]] ]
+ // CHECK-DAG: Return [ [[Or]] ]
+
+ // CHECK-START: int Main.OrAllOnes(int) constant_folding (after)
+ // CHECK-DAG: [[ConstF:i\d+]] IntConstant -1
+ // CHECK-NOT: Or
+ // CHECK-DAG: Return [ [[ConstF]] ]
+
+ public static int OrAllOnes(int arg) {
+ return arg | -1;
+ }
+
+ // CHECK-START: long Main.Rem0(long) constant_folding (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: [[DivZeroCheck:j\d+]] DivZeroCheck [ [[Arg]] ]
+ // CHECK-DAG: [[Rem:j\d+]] Rem [ [[Const0]] [[DivZeroCheck]] ]
+ // CHECK-DAG: Return [ [[Rem]] ]
+
+ // CHECK-START: long Main.Rem0(long) constant_folding (after)
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-NOT: Rem
+ // CHECK-DAG: Return [ [[Const0]] ]
+
+ public static long Rem0(long arg) {
+ return 0 % arg;
+ }
+
+ // CHECK-START: int Main.Rem1(int) constant_folding (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[Rem:i\d+]] Rem [ [[Arg]] [[Const1]] ]
+ // CHECK-DAG: Return [ [[Rem]] ]
+
+ // CHECK-START: int Main.Rem1(int) constant_folding (after)
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-NOT: Rem
+ // CHECK-DAG: Return [ [[Const0]] ]
+
+ public static int Rem1(int arg) {
+ return arg % 1;
+ }
+
+ // CHECK-START: long Main.RemN1(long) constant_folding (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[ConstN1:j\d+]] LongConstant -1
+ // CHECK-DAG: [[DivZeroCheck:j\d+]] DivZeroCheck [ [[Arg]] ]
+ // CHECK-DAG: [[Rem:j\d+]] Rem [ [[Arg]] [[DivZeroCheck]] ]
+ // CHECK-DAG: Return [ [[Rem]] ]
+
+ // CHECK-START: long Main.RemN1(long) constant_folding (after)
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-NOT: Rem
+ // CHECK-DAG: Return [ [[Const0]] ]
+
+ public static long RemN1(long arg) {
+ return arg % -1;
+ }
+
+ // CHECK-START: int Main.Shl0(int) constant_folding (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Shl:i\d+]] Shl [ [[Const0]] [[Arg]] ]
+ // CHECK-DAG: Return [ [[Shl]] ]
+
+ // CHECK-START: int Main.Shl0(int) constant_folding (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-NOT: Shl
+ // CHECK-DAG: Return [ [[Const0]] ]
+
+ public static int Shl0(int arg) {
+ return 0 << arg;
+ }
+
+ // CHECK-START: long Main.Shr0(int) constant_folding (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: [[Shr:j\d+]] Shr [ [[Const0]] [[Arg]] ]
+ // CHECK-DAG: Return [ [[Shr]] ]
+
+ // CHECK-START: long Main.Shr0(int) constant_folding (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-NOT: Shr
+ // CHECK-DAG: Return [ [[Const0]] ]
+
+ public static long Shr0(int arg) {
+ return (long)0 >> arg;
+ }
+
+ // CHECK-START: long Main.SubSameLong(long) constant_folding (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Sub:j\d+]] Sub [ [[Arg]] [[Arg]] ]
+ // CHECK-DAG: Return [ [[Sub]] ]
+
+ // CHECK-START: long Main.SubSameLong(long) constant_folding (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-NOT: Sub
+ // CHECK-DAG: Return [ [[Const0]] ]
+
+ public static long SubSameLong(long arg) {
+ return arg - arg;
+ }
+
+ // CHECK-START: int Main.UShr0(int) constant_folding (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[UShr:i\d+]] UShr [ [[Const0]] [[Arg]] ]
+ // CHECK-DAG: Return [ [[UShr]] ]
+
+ // CHECK-START: int Main.UShr0(int) constant_folding (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-NOT: UShr
+ // CHECK-DAG: Return [ [[Const0]] ]
+
+ public static int UShr0(int arg) {
+ return 0 >>> arg;
+ }
+
+ // CHECK-START: int Main.XorSameInt(int) constant_folding (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Xor:i\d+]] Xor [ [[Arg]] [[Arg]] ]
+ // CHECK-DAG: Return [ [[Xor]] ]
+
+ // CHECK-START: int Main.XorSameInt(int) constant_folding (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-NOT: Xor
+ // CHECK-DAG: Return [ [[Const0]] ]
+
+ public static int XorSameInt(int arg) {
+ return arg ^ arg;
+ }
+
public static void main(String[] args) {
- if (IntNegation() != -42) {
- throw new Error();
- }
-
- if (IntAddition1() != 3) {
- throw new Error();
- }
-
- if (IntAddition2() != 14) {
- throw new Error();
- }
-
- if (IntSubtraction() != 4) {
- throw new Error();
- }
-
- if (LongAddition() != 3L) {
- throw new Error();
- }
-
- if (LongSubtraction() != 4L) {
- throw new Error();
- }
-
- if (StaticCondition() != 5) {
- throw new Error();
- }
-
- if (JumpsAndConditionals(true) != 7) {
- throw new Error();
- }
-
- if (JumpsAndConditionals(false) != 3) {
- throw new Error();
- }
+ assertIntEquals(IntNegation(), -42);
+ assertIntEquals(IntAddition1(), 3);
+ assertIntEquals(IntAddition2(), 14);
+ assertIntEquals(IntSubtraction(), 4);
+ assertLongEquals(LongAddition(), 3L);
+ assertLongEquals(LongSubtraction(), 4L);
+ assertIntEquals(StaticCondition(), 5);
+ assertIntEquals(JumpsAndConditionals(true), 7);
+ assertIntEquals(JumpsAndConditionals(false), 3);
+ int random = 123456; // Chosen randomly.
+ assertIntEquals(And0(random), 0);
+ assertLongEquals(Mul0(random), 0);
+ assertIntEquals(OrAllOnes(random), -1);
+ assertLongEquals(Rem0(random), 0);
+ assertIntEquals(Rem1(random), 0);
+ assertLongEquals(RemN1(random), 0);
+ assertIntEquals(Shl0(random), 0);
+ assertLongEquals(Shr0(random), 0);
+ assertLongEquals(SubSameLong(random), 0);
+ assertIntEquals(UShr0(random), 0);
+ assertIntEquals(XorSameInt(random), 0);
}
}
diff --git a/test/457-regs/expected.txt b/test/457-regs/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/457-regs/expected.txt
diff --git a/test/457-regs/info.txt b/test/457-regs/info.txt
new file mode 100644
index 0000000..d950003
--- /dev/null
+++ b/test/457-regs/info.txt
@@ -0,0 +1 @@
+Tests debuggability of DEX registers.
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
new file mode 100644
index 0000000..ce701e8
--- /dev/null
+++ b/test/457-regs/regs_jni.cc
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arch/context.h"
+#include "jni.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+#include "stack.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+class TestVisitor : public StackVisitor {
+ public:
+ TestVisitor(Thread* thread, Context* context)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(thread, context) {}
+
+ bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ std::string m_name(m->GetName());
+
+ if (m_name.compare("mergeOk") == 0) {
+ uint32_t value = 0;
+
+ CHECK(GetVReg(m, 0, kIntVReg, &value));
+ CHECK_EQ(value, 0u);
+
+ CHECK(GetVReg(m, 1, kIntVReg, &value));
+ CHECK_EQ(value, 1u);
+
+ CHECK(GetVReg(m, 2, kIntVReg, &value));
+ CHECK_EQ(value, 2u);
+
+ CHECK(GetVReg(m, 3, kIntVReg, &value));
+ CHECK_EQ(value, 1u);
+
+ CHECK(GetVReg(m, 4, kIntVReg, &value));
+ CHECK_EQ(value, 2u);
+ did_check_ = true;
+ } else if (m_name.compare("mergeNotOk") == 0) {
+ uint32_t value = 0;
+
+ CHECK(GetVReg(m, 0, kIntVReg, &value));
+ CHECK_EQ(value, 0u);
+
+ CHECK(GetVReg(m, 1, kIntVReg, &value));
+ CHECK_EQ(value, 1u);
+
+ bool success = GetVReg(m, 2, kIntVReg, &value);
+ if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+
+ CHECK(GetVReg(m, 3, kReferenceVReg, &value));
+ CHECK_EQ(value, 1u);
+
+ CHECK(GetVReg(m, 4, kFloatVReg, &value));
+ uint32_t cast = bit_cast<float, uint32_t>(4.0f);
+ CHECK_EQ(value, cast);
+ did_check_ = true;
+ } else if (m_name.compare("phiEquivalent") == 0) {
+ uint32_t value = 0;
+
+ CHECK(GetVReg(m, 0, kIntVReg, &value));
+ // Quick doesn't like this one on x64.
+ CHECK_EQ(value, 0u);
+
+ CHECK(GetVReg(m, 1, kIntVReg, &value));
+ CHECK_EQ(value, 1u);
+
+ CHECK(GetVReg(m, 2, kFloatVReg, &value));
+ CHECK_EQ(value, 1u);
+
+ did_check_ = true;
+ } else if (m_name.compare("mergeReferences") == 0) {
+ uint32_t value = 0;
+
+ CHECK(GetVReg(m, 0, kIntVReg, &value));
+ CHECK_EQ(value, 0u);
+
+ CHECK(GetVReg(m, 1, kIntVReg, &value));
+ CHECK_EQ(value, 1u);
+
+ CHECK(GetVReg(m, 2, kReferenceVReg, &value));
+ CHECK_EQ(value, 0u);
+
+ CHECK(GetVReg(m, 3, kReferenceVReg, &value));
+ CHECK_NE(value, 0u);
+
+ did_check_ = true;
+ } else if (m_name.compare("phiAllEquivalents") == 0) {
+ uint32_t value = 0;
+
+ CHECK(GetVReg(m, 0, kIntVReg, &value));
+ CHECK_EQ(value, 0u);
+
+ CHECK(GetVReg(m, 1, kIntVReg, &value));
+ CHECK_EQ(value, 1u);
+
+ CHECK(GetVReg(m, 2, kReferenceVReg, &value));
+ CHECK_EQ(value, 0u);
+
+ did_check_ = true;
+ }
+
+ return true;
+ }
+
+ bool did_check_ = false;
+};
+
+extern "C" JNIEXPORT void JNICALL Java_PhiLiveness_regsNativeCall(
+ JNIEnv*, jclass value ATTRIBUTE_UNUSED) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<Context> context(Context::Create());
+ TestVisitor visitor(soa.Self(), context.get());
+ visitor.WalkStack();
+ CHECK(visitor.did_check_);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_PhiLiveness_regsNativeCallWithParameters(
+ JNIEnv*, jclass value ATTRIBUTE_UNUSED, jobject main, jint int_value, jfloat float_value) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<Context> context(Context::Create());
+ CHECK(soa.Decode<mirror::Object*>(main) == nullptr);
+ CHECK_EQ(int_value, 0);
+ int32_t cast = bit_cast<float, int32_t>(float_value);
+ CHECK_EQ(cast, 0);
+ TestVisitor visitor(soa.Self(), context.get());
+ visitor.WalkStack();
+ CHECK(visitor.did_check_);
+}
+
+} // namespace
+
+} // namespace art
diff --git a/test/457-regs/smali/PhiLiveness.smali b/test/457-regs/smali/PhiLiveness.smali
new file mode 100644
index 0000000..c8a6773
--- /dev/null
+++ b/test/457-regs/smali/PhiLiveness.smali
@@ -0,0 +1,82 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LPhiLiveness;
+
+.super Ljava/lang/Object;
+
+.method public static mergeOk(ZB)V
+ .registers 5
+ const/4 v0, 0x0
+ const/4 v1, 0x1
+ move v2, v3
+ if-eq v1, v0, :else
+ move v2, v4
+ :else
+ invoke-static {}, LPhiLiveness;->regsNativeCall()V
+ return-void
+.end method
+
+.method public static mergeNotOk(ZF)V
+ .registers 5
+ const/4 v0, 0x0
+ const/4 v1, 0x1
+ move v2, v3
+ if-eq v1, v0, :else
+ move v2, v4
+ :else
+ invoke-static {}, LPhiLiveness;->regsNativeCall()V
+ return-void
+.end method
+
+.method public static mergeReferences(LMain;)V
+ .registers 4
+ const/4 v0, 0x0
+ const/4 v1, 0x1
+ move-object v2, p0
+ if-eq v1, v0, :else
+ move v2, v0
+ :else
+ invoke-static {}, LPhiLiveness;->regsNativeCall()V
+ return-void
+.end method
+
+.method public static phiEquivalent()F
+ .registers 5
+ const/4 v0, 0x0
+ const/4 v1, 0x1
+ move v2, v0
+ if-eq v1, v0, :else
+ move v2, v1
+ :else
+ invoke-static {}, LPhiLiveness;->regsNativeCall()V
+ return v2
+.end method
+
+.method public static phiAllEquivalents(LMain;)V
+ .registers 4
+ const/4 v0, 0x0
+ const/4 v1, 0x1
+ move v2, v0
+ if-eq v1, v0, :else
+ move v2, v0
+ :else
+ invoke-static {v2, v2, v2}, LPhiLiveness;->regsNativeCallWithParameters(LMain;IF)V
+ return-void
+.end method
+
+.method public static native regsNativeCall()V
+.end method
+.method public static native regsNativeCallWithParameters(LMain;IF)V
+.end method
diff --git a/test/457-regs/src/Main.java b/test/457-regs/src/Main.java
new file mode 100644
index 0000000..0d82033
--- /dev/null
+++ b/test/457-regs/src/Main.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) throws Exception {
+ Class<?> c = Class.forName("PhiLiveness");
+ Method m = c.getMethod("mergeOk", boolean.class, byte.class);
+ m.invoke(null, new Boolean(true), new Byte((byte)2));
+
+ m = c.getMethod("mergeNotOk", boolean.class, float.class);
+ m.invoke(null, new Boolean(true), new Float(4.0f));
+
+ m = c.getMethod("mergeReferences", Main.class);
+ m.invoke(null, new Main());
+
+ m = c.getMethod("phiEquivalent");
+ m.invoke(null);
+
+ m = c.getMethod("phiAllEquivalents", Main.class);
+ m.invoke(null, new Main());
+ }
+
+ static {
+ System.loadLibrary("arttest");
+ }
+}
diff --git a/test/458-checker-instruction-simplification/expected.txt b/test/458-checker-instruction-simplification/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/458-checker-instruction-simplification/expected.txt
diff --git a/test/458-checker-instruction-simplification/info.txt b/test/458-checker-instruction-simplification/info.txt
new file mode 100644
index 0000000..09da84b
--- /dev/null
+++ b/test/458-checker-instruction-simplification/info.txt
@@ -0,0 +1 @@
+Tests arithmetic identities optimizations in the optimizing compiler.
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
new file mode 100644
index 0000000..ef6428d
--- /dev/null
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -0,0 +1,300 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+public class Main {
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertLongEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ /**
+ * Tiny programs exercising optimizations of arithmetic identities.
+ */
+
+ // CHECK-START: long Main.Add0(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: [[Add:j\d+]] Add [ [[Const0]] [[Arg]] ]
+ // CHECK-DAG: Return [ [[Add]] ]
+
+ // CHECK-START: long Main.Add0(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-NOT: Add
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static long Add0(long arg) {
+ return 0 + arg;
+ }
+
+ // CHECK-START: int Main.AndAllOnes(int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[ConstF:i\d+]] IntConstant -1
+ // CHECK-DAG: [[And:i\d+]] And [ [[Arg]] [[ConstF]] ]
+ // CHECK-DAG: Return [ [[And]] ]
+
+ // CHECK-START: int Main.AndAllOnes(int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-NOT: And
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static int AndAllOnes(int arg) {
+ return arg & -1;
+ }
+
+ // CHECK-START: long Main.Div1(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const1:j\d+]] LongConstant 1
+ // CHECK-DAG: [[Div:j\d+]] Div [ [[Arg]] [[Const1]] ]
+ // CHECK-DAG: Return [ [[Div]] ]
+
+ // CHECK-START: long Main.Div1(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-NOT: Div
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static long Div1(long arg) {
+ return arg / 1;
+ }
+
+ // CHECK-START: int Main.DivN1(int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[ConstN1:i\d+]] IntConstant -1
+ // CHECK-DAG: [[Div:i\d+]] Div [ [[Arg]] [[ConstN1]] ]
+ // CHECK-DAG: Return [ [[Div]] ]
+
+ // CHECK-START: int Main.DivN1(int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg]] ]
+ // CHECK-NOT: Div
+ // CHECK-DAG: Return [ [[Neg]] ]
+
+ public static int DivN1(int arg) {
+ return arg / -1;
+ }
+
+ // CHECK-START: long Main.Mul1(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const1:j\d+]] LongConstant 1
+ // CHECK-DAG: [[Mul:j\d+]] Mul [ [[Arg]] [[Const1]] ]
+ // CHECK-DAG: Return [ [[Mul]] ]
+
+ // CHECK-START: long Main.Mul1(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-NOT: Mul
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static long Mul1(long arg) {
+ return arg * 1;
+ }
+
+ // CHECK-START: int Main.MulN1(int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[ConstN1:i\d+]] IntConstant -1
+ // CHECK-DAG: [[Mul:i\d+]] Mul [ [[Arg]] [[ConstN1]] ]
+ // CHECK-DAG: Return [ [[Mul]] ]
+
+ // CHECK-START: int Main.MulN1(int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg]] ]
+ // CHECK-NOT: Mul
+ // CHECK-DAG: Return [ [[Neg]] ]
+
+ public static int MulN1(int arg) {
+ return arg * -1;
+ }
+
+ // CHECK-START: long Main.MulPowerOfTwo128(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const128:j\d+]] LongConstant 128
+ // CHECK-DAG: [[Mul:j\d+]] Mul [ [[Arg]] [[Const128]] ]
+ // CHECK-DAG: Return [ [[Mul]] ]
+
+ // CHECK-START: long Main.MulPowerOfTwo128(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const7:i\d+]] IntConstant 7
+ // CHECK-DAG: [[Shl:j\d+]] Shl [ [[Arg]] [[Const7]] ]
+ // CHECK-NOT: Mul
+ // CHECK-DAG: Return [ [[Shl]] ]
+
+ public static long MulPowerOfTwo128(long arg) {
+ return arg * 128;
+ }
+
+ // CHECK-START: int Main.Or0(int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Or:i\d+]] Or [ [[Arg]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[Or]] ]
+
+ // CHECK-START: int Main.Or0(int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-NOT: Or
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static int Or0(int arg) {
+ return arg | 0;
+ }
+
+ // CHECK-START: long Main.OrSame(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Or:j\d+]] Or [ [[Arg]] [[Arg]] ]
+ // CHECK-DAG: Return [ [[Or]] ]
+
+ // CHECK-START: long Main.OrSame(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-NOT: Or
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static long OrSame(long arg) {
+ return arg | arg;
+ }
+
+ // CHECK-START: int Main.Shl0(int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Shl:i\d+]] Shl [ [[Arg]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[Shl]] ]
+
+ // CHECK-START: int Main.Shl0(int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-NOT: Shl
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static int Shl0(int arg) {
+ return arg << 0;
+ }
+
+ // CHECK-START: long Main.Shr0(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Shr:j\d+]] Shr [ [[Arg]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[Shr]] ]
+
+ // CHECK-START: long Main.Shr0(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-NOT: Shr
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static long Shr0(long arg) {
+ return arg >> 0;
+ }
+
+ // CHECK-START: long Main.Sub0(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: [[Sub:j\d+]] Sub [ [[Arg]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[Sub]] ]
+
+ // CHECK-START: long Main.Sub0(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-NOT: Sub
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static long Sub0(long arg) {
+ return arg - 0;
+ }
+
+ // CHECK-START: int Main.SubAliasNeg(int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Const0]] [[Arg]] ]
+ // CHECK-DAG: Return [ [[Sub]] ]
+
+ // CHECK-START: int Main.SubAliasNeg(int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg]] ]
+ // CHECK-NOT: Sub
+ // CHECK-DAG: Return [ [[Neg]] ]
+
+ public static int SubAliasNeg(int arg) {
+ return 0 - arg;
+ }
+
+ // CHECK-START: long Main.UShr0(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[UShr:j\d+]] UShr [ [[Arg]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[UShr]] ]
+
+ // CHECK-START: long Main.UShr0(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-NOT: UShr
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static long UShr0(long arg) {
+ return arg >>> 0;
+ }
+
+ // CHECK-START: int Main.Xor0(int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Xor:i\d+]] Xor [ [[Arg]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[Xor]] ]
+
+ // CHECK-START: int Main.Xor0(int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-NOT: Xor
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ public static int Xor0(int arg) {
+ return arg ^ 0;
+ }
+
+ // CHECK-START: int Main.XorAllOnes(int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[ConstF:i\d+]] IntConstant -1
+ // CHECK-DAG: [[Xor:i\d+]] Xor [ [[Arg]] [[ConstF]] ]
+ // CHECK-DAG: Return [ [[Xor]] ]
+
+ // CHECK-START: int Main.XorAllOnes(int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Not:i\d+]] Not [ [[Arg]] ]
+ // CHECK-NOT: Xor
+ // CHECK-DAG: Return [ [[Not]] ]
+
+ public static int XorAllOnes(int arg) {
+ return arg ^ -1;
+ }
+
+ public static void main(String[] args) {
+ int arg = 123456;
+
+ assertLongEquals(Add0(arg), arg);
+ assertIntEquals(AndAllOnes(arg), arg);
+ assertLongEquals(Div1(arg), arg);
+ assertIntEquals(DivN1(arg), -arg);
+ assertLongEquals(Mul1(arg), arg);
+ assertIntEquals(MulN1(arg), -arg);
+ assertLongEquals(MulPowerOfTwo128(arg), (128 * arg));
+ assertIntEquals(Or0(arg), arg);
+ assertLongEquals(OrSame(arg), arg);
+ assertIntEquals(Shl0(arg), arg);
+ assertLongEquals(Shr0(arg), arg);
+ assertLongEquals(Sub0(arg), arg);
+ assertIntEquals(SubAliasNeg(arg), -arg);
+ assertLongEquals(UShr0(arg), arg);
+ assertIntEquals(Xor0(arg), arg);
+ assertIntEquals(XorAllOnes(arg), ~arg);
+ }
+}
diff --git a/test/458-long-to-fpu/expected.txt b/test/458-long-to-fpu/expected.txt
new file mode 100644
index 0000000..daaac9e
--- /dev/null
+++ b/test/458-long-to-fpu/expected.txt
@@ -0,0 +1,2 @@
+42
+42
diff --git a/test/458-long-to-fpu/info.txt b/test/458-long-to-fpu/info.txt
new file mode 100644
index 0000000..7459cfb
--- /dev/null
+++ b/test/458-long-to-fpu/info.txt
@@ -0,0 +1,2 @@
+Regression test for x86's code generator, which had a bug in
+the long-to-float and long-to-double implementations.
diff --git a/test/458-long-to-fpu/src/Main.java b/test/458-long-to-fpu/src/Main.java
new file mode 100644
index 0000000..a8b6e78
--- /dev/null
+++ b/test/458-long-to-fpu/src/Main.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.out.println(floatConvert(false));
+ System.out.println(doubleConvert(false));
+ }
+
+ public static long floatConvert(boolean flag) {
+ if (flag) {
+ // Try defeating inlining.
+ floatConvert(false);
+ }
+ long l = myLong;
+ myFloat = (float)l;
+ return l;
+ }
+
+ public static long doubleConvert(boolean flag) {
+ if (flag) {
+ // Try defeating inlining.
+ floatConvert(false);
+ }
+ long l = myLong;
+ myFloat = (float)l;
+ return l;
+ }
+
+ public static long myLong = 42;
+ public static float myFloat = 2.0f;
+ public static double myDouble = 4.0d;
+}
diff --git a/test/459-dead-phi/expected.txt b/test/459-dead-phi/expected.txt
new file mode 100644
index 0000000..ba66466
--- /dev/null
+++ b/test/459-dead-phi/expected.txt
@@ -0,0 +1 @@
+0.0
diff --git a/test/459-dead-phi/info.txt b/test/459-dead-phi/info.txt
new file mode 100644
index 0000000..3f82ecb
--- /dev/null
+++ b/test/459-dead-phi/info.txt
@@ -0,0 +1 @@
+Regression test for optimizing when it is building the SSA form.
diff --git a/test/459-dead-phi/smali/EquivalentPhi.smali b/test/459-dead-phi/smali/EquivalentPhi.smali
new file mode 100644
index 0000000..4fa88a9
--- /dev/null
+++ b/test/459-dead-phi/smali/EquivalentPhi.smali
@@ -0,0 +1,41 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LEquivalentPhi;
+
+.super Ljava/lang/Object;
+
+.method public static equivalentPhi([F)F
+ .registers 5
+ const/4 v0, 0x0
+ # aget is initally expected to be an int, but will
+ # rightly become a float after type propagation.
+ aget v1, p0, v0
+ move v2, v1
+ if-eq v0, v0, :else
+ move v2, v0
+ :else
+ # v2 will be a phi with (int, int) as input
+ move v3, v2
+ if-eq v0, v0, :else2
+ move v3, v0
+ # v3 will be a phi with (int, int) as input.
+ : else2
+ # This instruction will lead to creating a phi equivalent
+ # for v3 with float type, which in turn will lead to creating
+ # a phi equivalent for v2 of type float. We used to forget to
+ # delete the old phi, which ends up having incompatible input
+ # types.
+ return v3
+.end method
diff --git a/test/459-dead-phi/src/Main.java b/test/459-dead-phi/src/Main.java
new file mode 100644
index 0000000..0ecc0bd
--- /dev/null
+++ b/test/459-dead-phi/src/Main.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) throws Exception {
+ Class<?> c = Class.forName("EquivalentPhi");
+ Method m = c.getMethod("equivalentPhi", float[].class);
+ System.out.println(m.invoke(null, new float[] { 0.0f }));
+ }
+}
diff --git a/test/460-multiple-returns3/expected.txt b/test/460-multiple-returns3/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/460-multiple-returns3/expected.txt
diff --git a/test/460-multiple-returns3/info.txt b/test/460-multiple-returns3/info.txt
new file mode 100644
index 0000000..cdd354b
--- /dev/null
+++ b/test/460-multiple-returns3/info.txt
@@ -0,0 +1,2 @@
+Tests inlining of a pattern not generated by DX: multiple
+returns in a single method.
diff --git a/test/460-multiple-returns3/smali/MultipleReturns.smali b/test/460-multiple-returns3/smali/MultipleReturns.smali
new file mode 100644
index 0000000..38569a7
--- /dev/null
+++ b/test/460-multiple-returns3/smali/MultipleReturns.smali
@@ -0,0 +1,40 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMultipleReturns;
+
+.super Ljava/lang/Object;
+
+.method public static caller()S
+ .registers 1
+ invoke-static {}, LMultipleReturns;->$opt$CalleeReturnShort()S
+ move-result v0
+ return v0
+.end method
+
+.method public static $opt$CalleeReturnShort()S
+ .registers 2
+ const/4 v0, 0x0
+ const/4 v1, 0x1
+ if-eq v1, v0, :else
+ if-eq v1, v0, :else2
+ const/4 v0, 0x4
+ :else2
+ return v0
+ :else
+ if-eq v1, v0, :else3
+ const/4 v1, 0x1
+ :else3
+ return v1
+.end method
diff --git a/test/460-multiple-returns3/src/Main.java b/test/460-multiple-returns3/src/Main.java
new file mode 100644
index 0000000..fb8a115
--- /dev/null
+++ b/test/460-multiple-returns3/src/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) throws Exception {
+ Class<?> c = Class.forName("MultipleReturns");
+ Method m = c.getMethod("caller");
+ short result = (Short) m.invoke(null);
+ if (result != 4) {
+ throw new Error("Expected 4, got " + result);
+ }
+ }
+}
diff --git a/test/461-get-reference-vreg/expected.txt b/test/461-get-reference-vreg/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/461-get-reference-vreg/expected.txt
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
new file mode 100644
index 0000000..f0b78e1
--- /dev/null
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arch/context.h"
+#include "jni.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+#include "stack.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+class TestVisitor : public StackVisitor {
+ public:
+ TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(thread, context), this_value_(this_value), found_method_index_(0) {}
+
+ bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ std::string m_name(m->GetName());
+
+ if (m_name.compare("testThisWithInstanceCall") == 0) {
+ found_method_index_ = 1;
+ uint32_t value = 0;
+ CHECK(GetVReg(m, 1, kReferenceVReg, &value));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
+ CHECK_EQ(GetThisObject(), this_value_);
+ } else if (m_name.compare("testThisWithStaticCall") == 0) {
+ found_method_index_ = 2;
+ uint32_t value = 0;
+ CHECK(GetVReg(m, 1, kReferenceVReg, &value));
+ } else if (m_name.compare("testParameter") == 0) {
+ found_method_index_ = 3;
+ uint32_t value = 0;
+ CHECK(GetVReg(m, 1, kReferenceVReg, &value));
+ } else if (m_name.compare("testObjectInScope") == 0) {
+ found_method_index_ = 4;
+ uint32_t value = 0;
+ CHECK(GetVReg(m, 0, kReferenceVReg, &value));
+ }
+
+ return true;
+ }
+
+ mirror::Object* this_value_;
+
+ // Value returned to Java to ensure the methods testSimpleVReg and testPairVReg
+ // have been found and tested.
+ jint found_method_index_;
+};
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_doNativeCallRef(JNIEnv*, jobject value) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<Context> context(Context::Create());
+ TestVisitor visitor(soa.Self(), context.get(), soa.Decode<mirror::Object*>(value));
+ visitor.WalkStack();
+ return visitor.found_method_index_;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_doStaticNativeCallRef(JNIEnv*, jclass) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<Context> context(Context::Create());
+ TestVisitor visitor(soa.Self(), context.get(), nullptr);
+ visitor.WalkStack();
+ return visitor.found_method_index_;
+}
+
+} // namespace
+
+} // namespace art
diff --git a/test/461-get-reference-vreg/info.txt b/test/461-get-reference-vreg/info.txt
new file mode 100644
index 0000000..1e5e971
--- /dev/null
+++ b/test/461-get-reference-vreg/info.txt
@@ -0,0 +1 @@
+Tests for inspecting DEX registers holding references.
diff --git a/test/461-get-reference-vreg/src/Main.java b/test/461-get-reference-vreg/src/Main.java
new file mode 100644
index 0000000..a94c6fb
--- /dev/null
+++ b/test/461-get-reference-vreg/src/Main.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public Main() {
+ }
+
+ int testThisWithInstanceCall() {
+ return doNativeCallRef();
+ }
+
+ int testThisWithStaticCall() {
+ return doStaticNativeCallRef();
+ }
+
+ static int testParameter(Object a) {
+ return doStaticNativeCallRef();
+ }
+
+ static int testObjectInScope() {
+ Object a = array[0];
+ return doStaticNativeCallRef();
+ }
+
+ native int doNativeCallRef();
+ static native int doStaticNativeCallRef();
+
+ static {
+ System.loadLibrary("arttest");
+ }
+
+ public static void main(String[] args) {
+ Main rm = new Main();
+ if (rm.testThisWithInstanceCall() != 1) {
+ throw new Error("Expected 1");
+ }
+
+ if (rm.testThisWithStaticCall() != 2) {
+ throw new Error("Expected 2");
+ }
+
+ if (testParameter(new Object()) != 3) {
+ throw new Error("Expected 3");
+ }
+
+ if (testObjectInScope() != 4) {
+ throw new Error("Expected 4");
+ }
+ }
+
+ static Object[] array = new Object[] { new Object() };
+}
diff --git a/test/703-floating-point-div/src/Main.java b/test/703-floating-point-div/src/Main.java
index 9990a54..2303702 100644
--- a/test/703-floating-point-div/src/Main.java
+++ b/test/703-floating-point-div/src/Main.java
@@ -41,7 +41,7 @@
double d7 = -0.0;
double d8 = Double.MAX_VALUE;
double d9 = Double.MIN_VALUE;
- double d0 = Double.NaN;
+ double dNaN = Double.NaN;
expectEquals(Double.doubleToRawLongBits(dPi/d1), 0x1921fb54442d18L);
expectEquals(Double.doubleToRawLongBits(dPi/d2), 0xbff921fb54442d18L);
@@ -53,7 +53,10 @@
expectEquals(Double.doubleToRawLongBits(dPi/d8), 0xc90fdaa22168cL);
expectEquals(Double.doubleToRawLongBits(dPi/d9), 0x7ff0000000000000L);
- expectEquals(Double.doubleToRawLongBits(dPi/d0), 0x7ff8000000000000L);
+
+ // Not-a-number computation. Use doubleToLongBits to get canonical NaN. The literal value
+ // is the canonical NaN (see Double.doubleToLongBits).
+ expectEquals(Double.doubleToLongBits(dPi/dNaN), 0x7ff8000000000000L);
}
public static void divFloatTest() {
@@ -66,7 +69,7 @@
float f7 = -0.0f;
float f8 = Float.MAX_VALUE;
float f9 = Float.MIN_VALUE;
- float f0 = Float.NaN;
+ float fNaN = Float.NaN;
expectEquals(Float.floatToRawIntBits(fPi/f1), 0xc90fdb);
expectEquals(Float.floatToRawIntBits(fPi/f2), 0xbfc90fdb);
@@ -78,7 +81,10 @@
expectEquals(Float.floatToRawIntBits(fPi/f8), 0x6487ee);
expectEquals(Float.floatToRawIntBits(fPi/f9), 0x7f800000);
- expectEquals(Float.floatToRawIntBits(fPi/f0), 0x7fc00000);
+
+ // Not-a-number computation. Use floatToIntBits to get canonical NaN. The literal value
+ // is the canonical NaN (see Float.floatToIntBits).
+ expectEquals(Float.floatToIntBits(fPi/fNaN), 0x7fc00000);
}
public static void main(String[] args) {
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 75c5d72..0cafb06 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -29,7 +29,9 @@
117-nopatchoat/nopatchoat.cc \
118-noimage-dex2oat/noimage-dex2oat.cc \
454-get-vreg/get_vreg_jni.cc \
- 455-set-vreg/set_vreg_jni.cc
+ 455-set-vreg/set_vreg_jni.cc \
+ 457-regs/regs_jni.cc \
+ 461-get-reference-vreg/get_reference_vreg_jni.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ifdef TARGET_2ND_ARCH
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 10c422e..d4eaf4c 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -130,6 +130,10 @@
ifeq ($(ART_TEST_RUN_TEST_NDEBUG),true)
RUN_TYPES += ndebug
endif
+DEBUGGABLE_TYPES := nondebuggable
+ifeq ($(ART_TEST_RUN_TEST_DEBUGGABLE),true)
+DEBUGGABLE_TYPES += debuggable
+endif
ADDRESS_SIZES_TARGET := $(ART_PHONY_TEST_TARGET_SUFFIX)
ADDRESS_SIZES_HOST := $(ART_PHONY_TEST_HOST_SUFFIX)
ifeq ($(ART_TEST_RUN_TEST_2ND_ARCH),true)
@@ -150,16 +154,17 @@
$(foreach jni, $(8), \
$(foreach image, $(9), \
$(foreach pictest, $(10), \
- $(foreach test, $(11), \
- $(foreach address_size, $(12), \
- test-art-$(target)-run-test-$(run-type)-$(prebuild)-$(compiler)-$(relocate)-$(trace)-$(gc)-$(jni)-$(image)-$(pictest)-$(test)$(address_size) \
- ))))))))))))
+ $(foreach debuggable, $(11), \
+ $(foreach test, $(12), \
+ $(foreach address_size, $(13), \
+ test-art-$(target)-run-test-$(run-type)-$(prebuild)-$(compiler)-$(relocate)-$(trace)-$(gc)-$(jni)-$(image)-$(pictest)-$(debuggable)-$(test)$(address_size) \
+ )))))))))))))
endef # all-run-test-names
# To generate a full list or tests:
# $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
# $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
-# $(TEST_ART_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+# $(DEBUGGABLE_TYPES) $(TEST_ART_RUN_TESTS), $(ALL_ADDRESS_SIZES)
# Convert's a rule name to the form used in variables, e.g. no-relocate to NO_RELOCATE
define name-to-var
@@ -176,7 +181,7 @@
ifdef dist_goal
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
endif
TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
@@ -190,7 +195,7 @@
ifneq (,$(filter prebuild,$(PREBUILD_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),prebuild, \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_PREBUILD_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_PREBUILD_RUN_TESTS), $(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_PREBUILD_RUN_TESTS :=
@@ -201,7 +206,7 @@
ifneq (,$(filter no-prebuild,$(PREBUILD_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-prebuild, \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_NO_PREBUILD_TESTS :=
@@ -216,7 +221,7 @@
ifneq (,$(filter no-relocate,$(RELOCATE_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), no-relocate,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_NO_RELOCATE_TESTS), $(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_NO_RELOCATE_TESTS), $(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
@@ -227,14 +232,14 @@
ifneq (,$(filter gcstress,$(GC_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),gcstress,$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DBEUGGABLE_TYPES), $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
# 115-native-bridge setup is complicated. Need to implement it correctly for the target.
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
- $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),115-native-bridge, \
+ $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), 115-native-bridge, \
$(ALL_ADDRESS_SIZES))
# 130-hprof dumps the heap and runs hprof-conv to check whether the file is somewhat readable. This
@@ -243,7 +248,7 @@
# very hard to write here, as (for a complete test) JDWP must be set up.
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),130-hprof,$(ALL_ADDRESS_SIZES))
+ $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),130-hprof,$(ALL_ADDRESS_SIZES))
# All these tests check that we have sane behavior if we don't have a patchoat or dex2oat.
# Therefore we shouldn't run them in situations where we actually don't have these since they
@@ -257,20 +262,20 @@
ifneq (,$(filter no-dex2oat,$(PREBUILD_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-dex2oat, \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),$(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
ifneq (,$(filter no-image,$(IMAGE_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),no-image, \
- $(PICTEST_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
ifneq (,$(filter relocate-no-patchoat,$(RELOCATE_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), relocate-no-patchoat,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_FALLBACK_RUN_TESTS :=
@@ -283,7 +288,7 @@
ifneq (,$(filter trace,$(TRACE_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),trace,$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),$(TEST_ART_BROKEN_TRACING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_TRACING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_TRACING_RUN_TESTS :=
@@ -306,47 +311,36 @@
131-structural-change \
454-get-vreg \
455-set-vreg \
+ 457-regs \
+ 461-get-reference-vreg \
ifneq (,$(filter ndebug,$(RUN_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
$(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),$(TEST_ART_BROKEN_NDEBUG_TESTS),$(ALL_ADDRESS_SIZES))
+ $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_NDEBUG_TESTS),$(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_NDEBUG_TESTS :=
# Known broken tests for the default compiler (Quick).
-TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
+TEST_ART_BROKEN_DEFAULT_RUN_TESTS := \
+ 457-regs
ifneq (,$(filter default,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
default,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_DEFAULT_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_DEFAULT_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
-# Tests known to be broken for the optimizing compiler on 32-bit targets due to
-# inability to allocate registers for methods with long values.
-TEST_ART_BROKEN_OPTIMIZING_32_RUN_TESTS := \
- 441-checker-inliner \
- 442-checker-constant-folding \
-
-ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_32_RUN_TESTS),32)
-endif
-
-TEST_ART_BROKEN_OPTIMIZING_32_RUN_TESTS :=
-
# Known broken tests for the arm64 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS :=
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS),64)
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS),64)
endif
TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS :=
@@ -359,7 +353,7 @@
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
# If ART_USE_OPTIMIZING_COMPILER is set to true, then the default core.art has been
@@ -367,11 +361,25 @@
ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
default,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS :=
+# Tests that should fail when the optimizing compiler compiles them non-debuggable.
+TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS := \
+ 454-get-vreg \
+ 455-set-vreg \
+ 457-regs \
+
+ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),nondebuggable,$(TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS :=
+
# Clear variables ahead of appending to them when defining tests.
$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))
@@ -405,6 +413,9 @@
$(foreach target, $(TARGET_TYPES), \
$(foreach run_type, $(RUN_TYPES), \
$(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(run_type))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+ $(foreach debuggable_type, $(DEBUGGABLE_TYPES), \
+ $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(debuggable_type))_RULES :=)))
# We need dex2oat and dalvikvm on the target as well as the core images (all images as we sync
# only once).
@@ -441,7 +452,8 @@
# test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
# {4: interpreter default optimizing jit}-{5: relocate no-relocate relocate-no-patchoat}-
# {6: trace or no-trace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
-# {9: no-image image picimage}-{10: pictest nopictest}-{11: test name}{12: 32 or 64}
+# {9: no-image image picimage}-{10: pictest nopictest}-
+# {11: nondebuggable debuggable}-{12: test name}{13: 32 or 64}
define define-test-art-run-test
run_test_options :=
prereq_rule :=
@@ -582,27 +594,27 @@
run_test_options += --no-image
# Add the core dependency. This is required for pre-building.
ifeq ($(1),host)
- prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_$(12))
+ prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_$(13))
else
- prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_$(12))
+ prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_$(13))
endif
else
ifeq ($(9),image)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
# Add the core dependency.
ifeq ($(1),host)
- prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_$(12))
+ prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_$(13))
else
- prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_$(12))
+ prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_$(13))
endif
else
ifeq ($(9),picimage)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
run_test_options += --pic-image
ifeq ($(1),host)
- prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_$(12))
+ prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_$(13))
else
- prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_pic_$(12))
+ prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_pic_$(13))
endif
else
$$(error found $(9) expected $(IMAGE_TYPES))
@@ -618,19 +630,30 @@
$$(error found $(10) expected $(PICTEST_TYPES))
endif
endif
- # $(11) is the test name
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_$(call name-to-var,$(11))_RULES
- ifeq ($(12),64)
+ ifeq ($(11),debuggable)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEBUGGABLE_RULES
+ run_test_options += --debuggable
+ else
+ ifeq ($(11),nondebuggable)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NONDEBUGGABLE_RULES
+ # Nothing to be done.
+ else
+ $$(error found $(11) expected $(DEBUGGABLE_TYPES))
+ endif
+ endif
+ # $(12) is the test name.
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_$(call name-to-var,$(12))_RULES
+ ifeq ($(13),64)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_64_RULES
run_test_options += --64
else
- ifeq ($(12),32)
+ ifeq ($(13),32)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_32_RULES
else
- $$(error found $(12) expected $(ALL_ADDRESS_SIZES))
+ $$(error found $(13) expected $(ALL_ADDRESS_SIZES))
endif
endif
- run_test_rule_name := test-art-$(1)-run-test-$(2)-$(3)-$(4)-$(5)-$(6)-$(7)-$(8)-$(9)-$(10)-$(11)$(12)
+ run_test_rule_name := test-art-$(1)-run-test-$(2)-$(3)-$(4)-$(5)-$(6)-$(7)-$(8)-$(9)-$(10)-$(11)-$(12)$(13)
run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \
$$(run_test_options)
ifneq ($(ART_TEST_ANDROID_ROOT),)
@@ -643,7 +666,7 @@
DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
- art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(11) \
+ art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(12) \
&& $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
$$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \
echo "run-test run as top-level target, removing test directory $(ART_HOST_TEST_DIR)" && \
@@ -671,8 +694,9 @@
$(foreach jni, $(JNI_TYPES), \
$(foreach image, $(IMAGE_TYPES), \
$(foreach pictest, $(PICTEST_TYPES), \
- $(eval $(call define-test-art-run-test,$(target),$(run_type),$(prebuild),$(compiler),$(relocate),$(trace),$(gc),$(jni),$(image),$(pictest),$(test),$(address_size))) \
- ))))))))))))
+ $(foreach debuggable, $(DEBUGGABLE_TYPES), \
+ $(eval $(call define-test-art-run-test,$(target),$(run_type),$(prebuild),$(compiler),$(relocate),$(trace),$(gc),$(jni),$(image),$(pictest),$(debuggable),$(test),$(address_size))) \
+ )))))))))))))
define-test-art-run-test :=
# Define a phony rule whose purpose is to test its prerequisites.
@@ -710,6 +734,9 @@
$(foreach jni, $(JNI_TYPES), $(eval \
$(call define-test-art-run-test-group,test-art-$(target)-run-test-$(jni),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(jni))_RULES)))))
$(foreach target, $(TARGET_TYPES), \
+ $(foreach debuggable, $(DEBUGGABLE_TYPES), $(eval \
+ $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(debuggable),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(debuggable))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
$(foreach image, $(IMAGE_TYPES), $(eval \
$(call define-test-art-run-test-group,test-art-$(target)-run-test-$(image),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(image))_RULES)))))
$(foreach target, $(TARGET_TYPES), \
@@ -740,6 +767,9 @@
$(foreach jni, $(JNI_TYPES), \
$(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(jni))_RULES :=)))
$(foreach target, $(TARGET_TYPES), \
+ $(foreach debuggable, $(DEBUGGABLE_TYPES), \
+ $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(debuggable))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
$(foreach image, $(IMAGE_TYPES), \
$(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(image))_RULES :=)))
$(foreach target, $(TARGET_TYPES), \
@@ -764,6 +794,7 @@
ADDRESS_SIZES_HOST :=
ALL_ADDRESS_SIZES :=
RUN_TYPES :=
+DEBUGGABLE_TYPES :=
include $(LOCAL_PATH)/Android.libarttest.mk
include art/test/Android.libnativebridgetest.mk
diff --git a/test/MultiDex/Main.java b/test/MultiDex/Main.java
new file mode 100644
index 0000000..659dba9
--- /dev/null
+++ b/test/MultiDex/Main.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+ public static void main(String args[]) {
+ Second second = new Second();
+ System.out.println(second.getSecond());
+ }
+}
diff --git a/test/MultiDex/Second.java b/test/MultiDex/Second.java
new file mode 100644
index 0000000..540aedb
--- /dev/null
+++ b/test/MultiDex/Second.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Second {
+ public String getSecond() {
+ return "I Second That.";
+ }
+}
diff --git a/test/MultiDex/main.jpp b/test/MultiDex/main.jpp
new file mode 100644
index 0000000..a5d7a6c
--- /dev/null
+++ b/test/MultiDex/main.jpp
@@ -0,0 +1,3 @@
+main:
+ @@com.android.jack.annotations.ForceInMainDex
+ class Main
diff --git a/test/MultiDex/main.list b/test/MultiDex/main.list
new file mode 100644
index 0000000..44ba78e
--- /dev/null
+++ b/test/MultiDex/main.list
@@ -0,0 +1 @@
+Main.class
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 7a2ad1c..671d56d 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -265,7 +265,7 @@
fi
if [ "$JIT" = "y" ]; then
- INT_OPTS="-Xjit"
+ INT_OPTS="-Xusejit:true"
if [ "$VERIFY" = "y" ] ; then
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
else
diff --git a/test/run-test b/test/run-test
index 52f5e0c..2950af1 100755
--- a/test/run-test
+++ b/test/run-test
@@ -147,6 +147,9 @@
run_args="${run_args} --prebuild"
prebuild_mode="yes"
shift;
+ elif [ "x$1" = "x--debuggable" ]; then
+ run_args="${run_args} -Xcompiler-option --debuggable"
+ shift;
elif [ "x$1" = "x--no-prebuild" ]; then
run_args="${run_args} --no-prebuild"
prebuild_mode="no"
@@ -431,6 +434,7 @@
echo " -Xcompiler-option Pass an option to the compiler."
echo " --runtime-option Pass an option to the runtime."
echo " --debug Wait for a debugger to attach."
+ echo " --debuggable Whether to compile Java code for a debugger."
echo " --gdb Run under gdb; incompatible with some tests."
echo " --build-only Build test files only (off by default)."
echo " --interpreter Enable interpreter only mode (off by default)."
@@ -464,7 +468,7 @@
echo " --gcverify Run with gc verification"
echo " --always-clean Delete the test files even if the test fails."
echo " --android-root [path] The path on target for the android root. (/system by default)."
- echo " --dex2oat-swap Use a dex2oat swap file."
+ echo " --dex2oat-swap Use a dex2oat swap file."
) 1>&2
exit 1
fi
diff --git a/tools/analyze-init-failures.py b/tools/analyze-init-failures.py
index f803ea3..cca05e1 100755
--- a/tools/analyze-init-failures.py
+++ b/tools/analyze-init-failures.py
@@ -40,6 +40,8 @@
class_fail_class = {}
class_fail_method = {}
+ class_fail_load_library = {}
+ class_fail_get_property = {}
root_failures = set()
root_errors = {}
@@ -82,6 +84,8 @@
immediate_class = root_err_class
immediate_method = root_method_name
root_errors[root_err_class] = error
+ was_load_library = False
+ was_get_property = False
# Now go "up" the stack.
while True:
raw_line = it.next()
@@ -95,8 +99,19 @@
# A class initializer is on the stack...
class_fail_class[err_class] = immediate_class
class_fail_method[err_class] = immediate_method
+ class_fail_load_library[err_class] = was_load_library
immediate_class = err_class
immediate_method = err_method_name
+ class_fail_get_property[err_class] = was_get_property
+ was_get_property = False
+ was_load_library = err_method_name == "loadLibrary"
+ was_get_property = was_get_property or err_method_name == "getProperty"
+ failed_clazz_norm = re.sub(r"^L", "", failed_clazz)
+ failed_clazz_norm = re.sub(r";$", "", failed_clazz_norm)
+ failed_clazz_norm = re.sub(r"/", "", failed_clazz_norm)
+ if immediate_class != failed_clazz_norm:
+ class_fail_class[failed_clazz_norm] = immediate_class
+ class_fail_method[failed_clazz_norm] = immediate_method
except StopIteration:
# print('Done')
break # Done
@@ -114,7 +129,11 @@
for (r_class, r_id) in class_index.items():
error_string = ''
if r_class in root_failures:
- error_string = ',color=Red,tooltip="' + root_errors[r_class] + '",URL="' + root_errors[r_class] + '"'
+ error_string = ',style=filled,fillcolor=Red,tooltip="' + root_errors[r_class] + '",URL="' + root_errors[r_class] + '"'
+ elif r_class in class_fail_load_library and class_fail_load_library[r_class] == True:
+ error_string = error_string + ',style=filled,fillcolor=Bisque'
+ elif r_class in class_fail_get_property and class_fail_get_property[r_class] == True:
+ error_string = error_string + ',style=filled,fillcolor=Darkseagreen'
print(' n%d [shape=box,label="%s"%s];' % (r_id, r_class, error_string))
# Some space.
diff --git a/tools/art b/tools/art
index 2408f9f..6c89a60 100644
--- a/tools/art
+++ b/tools/art
@@ -45,7 +45,7 @@
while true; do
if [ "$1" = "--invoke-with" ]; then
shift
- invoke_with="$1"
+ invoke_with="$invoke_with $1"
shift
elif [ "$1" = "-d" ]; then
LIBART="libartd.so"