Merge "Host dex2oat and patchoat require libcutils."
diff --git a/Android.mk b/Android.mk
index c740a0d..9360355 100644
--- a/Android.mk
+++ b/Android.mk
@@ -42,27 +42,7 @@
.PHONY: clean-oat-host
clean-oat-host:
- rm -f $(HOST_CORE_IMG_OUTS)
- rm -f $(HOST_CORE_OAT_OUTS)
- rm -f $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/*.odex
-ifneq ($(HOST_PREFER_32_BIT),true)
- rm -f $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/*.odex
-endif
- rm -f $(TARGET_CORE_IMG_OUTS)
- rm -f $(TARGET_CORE_OAT_OUTS)
- rm -rf $(DEXPREOPT_PRODUCT_DIR_FULL_PATH)
- rm -f $(TARGET_OUT_UNSTRIPPED)/system/framework/*.odex
- rm -f $(TARGET_OUT_UNSTRIPPED)/system/framework/*/*.oat
- rm -f $(TARGET_OUT_UNSTRIPPED)/system/framework/*/*.art
- rm -f $(TARGET_OUT)/framework/*/*.oat
- rm -f $(TARGET_OUT)/framework/*/*.art
- rm -f $(TARGET_OUT_APPS)/*.odex
- rm -f $(TARGET_OUT_INTERMEDIATES)/JAVA_LIBRARIES/*_intermediates/javalib.odex
- rm -f $(TARGET_OUT_INTERMEDIATES)/APPS/*_intermediates/*.odex
-ifdef TARGET_2ND_ARCH
- rm -f $(2ND_TARGET_OUT_INTERMEDIATES)/JAVA_LIBRARIES/*_intermediates/javalib.odex
- rm -f $(2ND_TARGET_OUT_INTERMEDIATES)/APPS/*_intermediates/*.odex
-endif
+ find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" | xargs rm -f
ifneq ($(TMPDIR),)
rm -rf $(TMPDIR)/$(USER)/test-*/dalvik-cache/*
rm -rf $(TMPDIR)/android-data/dalvik-cache/*
@@ -360,6 +340,7 @@
--boot-image=$(DEFAULT_DEX_PREOPT_BUILT_IMAGE) --dex-file=$(PRODUCT_OUT)/$(1) \
--dex-location=/$(1) --oat-file=$$@ \
--instruction-set=$(DEX2OAT_TARGET_ARCH) \
+ --instruction-set-variant=$(DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
--android-root=$(PRODUCT_OUT)/system --include-patch-information \
--runtime-arg -Xnorelocate
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 08b4ec2..8f00298 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -219,6 +219,10 @@
art_cflags += -DART_USE_READ_BARRIER=1
endif
+ifeq ($(ART_USE_TLAB),true)
+ art_cflags += -DART_USE_TLAB=1
+endif
+
# Cflags for non-debug ART and ART tools.
art_non_debug_cflags := \
-O3
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 6b6a9e0..c5669c0 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -60,7 +60,7 @@
$(call dexpreopt-remove-classes.dex,$@)
# Dex file dependencies for each gtest.
-ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MyClass Nested Statics StaticsFromCode
+ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MultiDex MyClass Nested Statics StaticsFromCode
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
@@ -159,6 +159,7 @@
runtime/intern_table_test.cc \
runtime/interpreter/safe_math_test.cc \
runtime/java_vm_ext_test.cc \
+ runtime/jit/jit_code_cache_test.cc \
runtime/leb128_test.cc \
runtime/mem_map_test.cc \
runtime/memory_region_test.cc \
@@ -166,6 +167,7 @@
runtime/mirror/object_test.cc \
runtime/monitor_pool_test.cc \
runtime/monitor_test.cc \
+ runtime/oat_file_test.cc \
runtime/oat_file_assistant_test.cc \
runtime/parsed_options_test.cc \
runtime/reference_table_test.cc \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 4d2fa41..710b130 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -230,6 +230,7 @@
$$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
--oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(3)TARGET_ARCH) \
+ --instruction-set-variant=$$($(3)DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$$($(3)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
--android-root=$$(PRODUCT_OUT)/system --include-patch-information \
$$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 0906753..904f117 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -48,12 +48,6 @@
dex/quick/mips/int_mips.cc \
dex/quick/mips/target_mips.cc \
dex/quick/mips/utility_mips.cc \
- dex/quick/mips64/assemble_mips64.cc \
- dex/quick/mips64/call_mips64.cc \
- dex/quick/mips64/fp_mips64.cc \
- dex/quick/mips64/int_mips64.cc \
- dex/quick/mips64/target_mips64.cc \
- dex/quick/mips64/utility_mips64.cc \
dex/quick/mir_to_lir.cc \
dex/quick/quick_compiler.cc \
dex/quick/ralloc_util.cc \
@@ -94,6 +88,7 @@
jni/quick/x86_64/calling_convention_x86_64.cc \
jni/quick/calling_convention.cc \
jni/quick/jni_compiler.cc \
+ optimizing/boolean_simplifier.cc \
optimizing/builder.cc \
optimizing/bounds_check_elimination.cc \
optimizing/code_generator.cc \
@@ -111,6 +106,7 @@
optimizing/intrinsics.cc \
optimizing/intrinsics_arm.cc \
optimizing/intrinsics_arm64.cc \
+ optimizing/intrinsics_x86.cc \
optimizing/intrinsics_x86_64.cc \
optimizing/licm.cc \
optimizing/locations.cc \
@@ -161,7 +157,6 @@
dex/quick/arm/arm_lir.h \
dex/quick/arm64/arm64_lir.h \
dex/quick/mips/mips_lir.h \
- dex/quick/mips64/mips64_lir.h \
dex/quick/resource_mask.h \
dex/compiler_enums.h \
dex/global_value_numbering.h \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 09be437..1d0aad5 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -178,8 +178,8 @@
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap);
callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
- method_inliner_map_.get()));
- options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
+ method_inliner_map_.get(),
+ CompilerCallbacks::CallbackMode::kCompileApp));
}
void CommonCompilerTest::TearDown() {
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 9cffbc8..d7b210d 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -78,7 +78,6 @@
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<VerificationResults> verification_results_;
std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
- std::unique_ptr<CompilerCallbacks> callbacks_;
std::unique_ptr<CompilerDriver> compiler_driver_;
std::unique_ptr<CumulativeLogger> timer_;
std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index fcefb6f..548b6f8 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -180,22 +180,21 @@
}
void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
- DCHECK(inst->Opcode() == Instruction::RETURN_VOID);
- // Are we compiling a non-clinit constructor?
- if (!unit_.IsConstructor() || unit_.IsStatic()) {
- return;
+ DCHECK_EQ(inst->Opcode(), Instruction::RETURN_VOID);
+ if (unit_.IsConstructor()) {
+ // Are we compiling a non clinit constructor which needs a barrier ?
+ if (!unit_.IsStatic() &&
+ driver_.RequiresConstructorBarrier(Thread::Current(), unit_.GetDexFile(),
+ unit_.GetClassDefIndex())) {
+ return;
+ }
}
- // Do we need a constructor barrier ?
- if (!driver_.RequiresConstructorBarrier(Thread::Current(), unit_.GetDexFile(),
- unit_.GetClassDefIndex())) {
- return;
- }
- // Replace RETURN_VOID by RETURN_VOID_BARRIER.
+ // Replace RETURN_VOID by RETURN_VOID_NO_BARRIER.
VLOG(compiler) << "Replacing " << Instruction::Name(inst->Opcode())
- << " by " << Instruction::Name(Instruction::RETURN_VOID_BARRIER)
+ << " by " << Instruction::Name(Instruction::RETURN_VOID_NO_BARRIER)
<< " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
<< PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
- inst->SetOpcode(Instruction::RETURN_VOID_BARRIER);
+ inst->SetOpcode(Instruction::RETURN_VOID_NO_BARRIER);
}
Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index a89b250..3d7a640 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -416,7 +416,7 @@
// 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
kAnInvoke | kAnHeavyWeight,
- // 73 RETURN_VOID_BARRIER
+ // 73 RETURN_VOID_NO_BARRIER
kAnBranch,
// 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index dfaff6c..f638b0b 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -374,7 +374,7 @@
// 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // 73 RETURN_VOID_BARRIER
+ // 73 RETURN_VOID_NO_BARRIER
DF_NOP,
// 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 266b7c3..c85c3b6 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -671,6 +671,9 @@
}
int dead_true_def = if_true->ssa_rep->defs[0];
raw_use_counts_[dead_true_def] = use_counts_[dead_true_def] = 0;
+ // Update ending vreg->sreg map for GC maps generation.
+ int def_vreg = SRegToVReg(mir->ssa_rep->defs[0]);
+ bb->data_flow_info->vreg_to_ssa_map_exit[def_vreg] = mir->ssa_rep->defs[0];
// We want to remove ft and tk and link bb directly to ft_ft. First, we need
// to update all Phi inputs correctly with UpdatePredecessor(ft->id, bb->id)
// since the live_def above comes from ft->first_mir_insn (if_false).
diff --git a/compiler/dex/pass_manager.cc b/compiler/dex/pass_manager.cc
index 6d58f65..6377a6c 100644
--- a/compiler/dex/pass_manager.cc
+++ b/compiler/dex/pass_manager.cc
@@ -33,7 +33,7 @@
// Add each pass which isn't disabled into default_pass_list_.
for (const auto* pass : passes_) {
if (options_.GetDisablePassList().find(pass->GetName()) != std::string::npos) {
- LOG(INFO) << "Skipping disabled pass " << pass->GetName();
+ VLOG(compiler) << "Skipping disabled pass " << pass->GetName();
} else {
default_pass_list_.push_back(pass);
}
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 8833da3..3e69878 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -848,7 +848,7 @@
ENCODING_MAP(kThumb2LdrPcRel12, 0xf8df0000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
- IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF | NEEDS_FIXUP,
+ IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF | NEEDS_FIXUP,
"ldr", "!0C, [r15pc, #!1d]", 4, kFixupLoad),
ENCODING_MAP(kThumb2BCond, 0xf0008000,
kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
@@ -1502,7 +1502,7 @@
break;
}
case kFixupAdr: {
- EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[2]));
+ const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[2]);
LIR* target = lir->target;
int32_t target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
: target->offset + ((target->flags.generation == lir->flags.generation) ? 0 :
@@ -1555,8 +1555,8 @@
}
case kFixupMovImmLST: {
// operands[1] should hold disp, [2] has add, [3] has tab_rec
- LIR *addPCInst = reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2]));
- EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ const LIR* addPCInst = UnwrapPointer<LIR>(lir->operands[2]);
+ const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
// If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
@@ -1565,8 +1565,8 @@
}
case kFixupMovImmHST: {
// operands[1] should hold disp, [2] has add, [3] has tab_rec
- LIR *addPCInst = reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2]));
- EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ const LIR* addPCInst = UnwrapPointer<LIR>(lir->operands[2]);
+ const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
// If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 1a9dbea..d46c25a 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -433,7 +433,7 @@
class StackOverflowSlowPath : public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), restore_lr_(restore_lr),
+ : LIRSlowPath(m2l, branch), restore_lr_(restore_lr),
sp_displace_(sp_displace) {
}
void Compile() OVERRIDE {
@@ -658,7 +658,7 @@
// NOTE: Method deduplication takes linker patches into account, so we can just pass 0
// as a placeholder for the offset.
LIR* call = RawLIR(current_dalvik_offset_, kThumb2Bl, 0,
- target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
+ target_method_idx, WrapPointer(target_dex_file), type);
AppendLIR(call);
call_method_insns_.push_back(call);
return call;
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 67fabbd..4141bcf 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -200,7 +200,7 @@
void UpdateIT(LIR* it, const char* new_guide);
void OpEndIT(LIR* it);
LIR* OpMem(OpKind op, RegStorage r_base, int disp);
- LIR* OpPcRelLoad(RegStorage reg, LIR* target);
+ void OpPcRelLoad(RegStorage reg, LIR* target);
LIR* OpReg(OpKind op, RegStorage r_dest_src);
void OpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index ef26323..9193e1b 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1081,9 +1081,10 @@
return true;
}
-LIR* ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+void ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
- return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target);
+ LIR* lir = NewLIR2(kThumb2LdrPcRel12, reg.GetReg(), 0);
+ lir->target = target;
}
LIR* ArmMir2Lir::OpVldm(RegStorage r_base, int count) {
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 13f9072..9812d9f 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -455,7 +455,7 @@
case 'T':
snprintf(tbuf, arraysize(tbuf), "%s", PrettyMethod(
static_cast<uint32_t>(lir->operands[1]),
- *reinterpret_cast<const DexFile*>(UnwrapPointer(lir->operands[2]))).c_str());
+ *UnwrapPointer<DexFile>(lir->operands[2])).c_str());
break;
case 'u': {
int offset_1 = lir->operands[0];
@@ -906,9 +906,7 @@
for (LIR* p : call_method_insns_) {
DCHECK_EQ(p->opcode, kThumb2Bl);
uint32_t target_method_idx = p->operands[1];
- const DexFile* target_dex_file =
- reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
-
+ const DexFile* target_dex_file = UnwrapPointer<DexFile>(p->operands[2]);
patches_.push_back(LinkerPatch::RelativeCodePatch(p->offset,
target_dex_file, target_method_idx));
}
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index aa5e5b4..329bb1e 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -1003,7 +1003,7 @@
0 : offset_adjustment) + target_lir->offset;
delta = target_offs - lir->offset;
} else if (lir->operands[2] >= 0) {
- EmbeddedData* tab = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[2]));
+ const EmbeddedData* tab = UnwrapPointer<EmbeddedData>(lir->operands[2]);
delta = tab->offset + offset_adjustment - lir->offset;
} else {
// No fixup: this usage allows to retrieve the current PC.
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 8275162..823cb60 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -351,8 +351,8 @@
if (generate_explicit_stack_overflow_check) {
class StackOverflowSlowPath: public LIRSlowPath {
public:
- StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr),
+ StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
+ : LIRSlowPath(m2l, branch),
sp_displace_(sp_displace) {
}
void Compile() OVERRIDE {
@@ -525,7 +525,7 @@
// NOTE: Method deduplication takes linker patches into account, so we can just pass 0
// as a placeholder for the offset.
LIR* call = RawLIR(current_dalvik_offset_, kA64Bl1t, 0,
- target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
+ target_method_idx, WrapPointer(target_dex_file), type);
AppendLIR(call);
call_method_insns_.push_back(call);
return call;
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index d5f0536..54fd46d 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -203,7 +203,7 @@
LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
void OpEndIT(LIR* it) OVERRIDE;
LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
- LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
+ void OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index a8ec6c0..49b15fe 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -449,7 +449,7 @@
}
bool Arm64Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
- int32_t encoded_imm = EncodeImmSingle(bit_cast<float, uint32_t>(0.5f));
+ int32_t encoded_imm = EncodeImmSingle(bit_cast<uint32_t, float>(0.5f));
A64Opcode wide = (is_double) ? WIDE(0) : UNWIDE(0);
RegLocation rl_src = info->args[0];
RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 92675f3..2372ccc 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -937,9 +937,10 @@
return true;
}
-LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+void Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
- return RawLIR(current_dalvik_offset_, kA64Ldr2rp, As32BitReg(reg).GetReg(), 0, 0, 0, 0, target);
+ LIR* lir = NewLIR2(kA64Ldr2rp, As32BitReg(reg).GetReg(), 0);
+ lir->target = target;
}
LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 136be94..09a34bf 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -851,9 +851,7 @@
for (LIR* p : call_method_insns_) {
DCHECK_EQ(p->opcode, kA64Bl1t);
uint32_t target_method_idx = p->operands[1];
- const DexFile* target_dex_file =
- reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
-
+ const DexFile* target_dex_file = UnwrapPointer<DexFile>(p->operands[2]);
patches_.push_back(LinkerPatch::RelativeCodePatch(p->offset,
target_dex_file, target_method_idx));
}
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 029c0ca..df72830 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -16,6 +16,7 @@
#include "mir_to_lir-inl.h"
+#include "base/bit_vector-inl.h"
#include "dex/mir_graph.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -88,6 +89,8 @@
inst->u.m.def_mask = &kEncodeAll;
LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
+ DCHECK(current_mir_ != nullptr || (current_dalvik_offset_ == 0 && safepoints_.empty()));
+ safepoints_.emplace_back(safepoint_pc, current_mir_);
}
void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
@@ -102,6 +105,8 @@
InsertLIRAfter(after, safepoint_pc);
}
DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
+ DCHECK(current_mir_ != nullptr || (current_dalvik_offset_ == 0 && safepoints_.empty()));
+ safepoints_.emplace_back(safepoint_pc, current_mir_);
}
/* Remove a LIR from the list. */
@@ -217,7 +222,7 @@
}
LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
<< lir->dalvik_offset << " @ "
- << reinterpret_cast<char*>(UnwrapPointer(lir->operands[0]));
+ << UnwrapPointer<char>(lir->operands[0]);
break;
case kPseudoExitBlock:
LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
@@ -411,7 +416,7 @@
LIR* Mir2Lir::ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method) {
while (data_target) {
if (static_cast<uint32_t>(data_target->operands[0]) == method.dex_method_index &&
- UnwrapPointer(data_target->operands[1]) == method.dex_file) {
+ UnwrapPointer<DexFile>(data_target->operands[1]) == method.dex_file) {
return data_target;
}
data_target = data_target->next;
@@ -423,7 +428,7 @@
LIR* Mir2Lir::ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx) {
while (data_target) {
if (static_cast<uint32_t>(data_target->operands[0]) == type_idx &&
- UnwrapPointer(data_target->operands[1]) == &dex_file) {
+ UnwrapPointer<DexFile>(data_target->operands[1]) == &dex_file) {
return data_target;
}
data_target = data_target->next;
@@ -486,8 +491,7 @@
data_lir = code_literal_list_;
while (data_lir != nullptr) {
uint32_t target_method_idx = data_lir->operands[0];
- const DexFile* target_dex_file =
- reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
+ const DexFile* target_dex_file = UnwrapPointer<DexFile>(data_lir->operands[1]);
patches_.push_back(LinkerPatch::CodePatch(code_buffer_.size(),
target_dex_file, target_method_idx));
PushUnpatchedReference(&code_buffer_);
@@ -496,8 +500,7 @@
data_lir = method_literal_list_;
while (data_lir != nullptr) {
uint32_t target_method_idx = data_lir->operands[0];
- const DexFile* target_dex_file =
- reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
+ const DexFile* target_dex_file = UnwrapPointer<DexFile>(data_lir->operands[1]);
patches_.push_back(LinkerPatch::MethodPatch(code_buffer_.size(),
target_dex_file, target_method_idx));
PushUnpatchedReference(&code_buffer_);
@@ -507,8 +510,7 @@
data_lir = class_literal_list_;
while (data_lir != nullptr) {
uint32_t target_type_idx = data_lir->operands[0];
- const DexFile* class_dex_file =
- reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
+ const DexFile* class_dex_file = UnwrapPointer<DexFile>(data_lir->operands[1]);
patches_.push_back(LinkerPatch::TypePatch(code_buffer_.size(),
class_dex_file, target_type_idx));
PushUnpatchedReference(&code_buffer_);
@@ -767,6 +769,71 @@
}
void Mir2Lir::CreateNativeGcMap() {
+ if (UNLIKELY((cu_->disable_opt & (1u << kPromoteRegs)) != 0u)) {
+ // If we're not promoting to physical registers, it's safe to use the verifier's notion of
+ // references. (We disable register promotion when type inference finds a type conflict and
+ // in that the case we defer to the verifier to avoid using the compiler's conflicting info.)
+ CreateNativeGcMapWithoutRegisterPromotion();
+ return;
+ }
+
+ ArenaBitVector* references = new (arena_) ArenaBitVector(arena_, mir_graph_->GetNumSSARegs(),
+ false);
+
+ // Calculate max native offset and max reference vreg.
+ MIR* prev_mir = nullptr;
+ int max_ref_vreg = -1;
+ CodeOffset max_native_offset = 0u;
+ for (const auto& entry : safepoints_) {
+ uint32_t native_offset = entry.first->offset;
+ max_native_offset = std::max(max_native_offset, native_offset);
+ MIR* mir = entry.second;
+ UpdateReferenceVRegs(mir, prev_mir, references);
+ max_ref_vreg = std::max(max_ref_vreg, references->GetHighestBitSet());
+ prev_mir = mir;
+ }
+
+#if defined(BYTE_ORDER) && (BYTE_ORDER == LITTLE_ENDIAN)
+ static constexpr bool kLittleEndian = true;
+#else
+ static constexpr bool kLittleEndian = false;
+#endif
+
+ // Build the GC map.
+ uint32_t reg_width = static_cast<uint32_t>((max_ref_vreg + 8) / 8);
+ GcMapBuilder native_gc_map_builder(&native_gc_map_,
+ safepoints_.size(),
+ max_native_offset, reg_width);
+ if (kLittleEndian) {
+ for (const auto& entry : safepoints_) {
+ uint32_t native_offset = entry.first->offset;
+ MIR* mir = entry.second;
+ UpdateReferenceVRegs(mir, prev_mir, references);
+ // For little-endian, the bytes comprising the bit vector's raw storage are what we need.
+ native_gc_map_builder.AddEntry(native_offset,
+ reinterpret_cast<const uint8_t*>(references->GetRawStorage()));
+ prev_mir = mir;
+ }
+ } else {
+ ArenaVector<uint8_t> references_buffer(arena_->Adapter());
+ references_buffer.resize(reg_width);
+ for (const auto& entry : safepoints_) {
+ uint32_t native_offset = entry.first->offset;
+ MIR* mir = entry.second;
+ UpdateReferenceVRegs(mir, prev_mir, references);
+ // Big-endian or unknown endianness, manually translate the bit vector data.
+ const auto* raw_storage = references->GetRawStorage();
+ for (size_t i = 0; i != reg_width; ++i) {
+ references_buffer[i] = static_cast<uint8_t>(
+ raw_storage[i / sizeof(raw_storage[0])] >> (8u * (i % sizeof(raw_storage[0]))));
+ }
+ native_gc_map_builder.AddEntry(native_offset, &references_buffer[0]);
+ prev_mir = mir;
+ }
+ }
+}
+
+void Mir2Lir::CreateNativeGcMapWithoutRegisterPromotion() {
DCHECK(!encoded_mapping_table_.empty());
MappingTable mapping_table(&encoded_mapping_table_[0]);
uint32_t max_native_offset = 0;
@@ -965,6 +1032,7 @@
block_label_list_(nullptr),
promotion_map_(nullptr),
current_dalvik_offset_(0),
+ current_mir_(nullptr),
estimated_native_code_size_(0),
reg_pool_(nullptr),
live_sreg_(0),
@@ -984,6 +1052,7 @@
slow_paths_(arena->Adapter(kArenaAllocSlowPaths)),
mem_ref_type_(ResourceMask::kHeapRef),
mask_cache_(arena),
+ safepoints_(arena->Adapter()),
in_to_reg_storage_mapping_(arena) {
switch_tables_.reserve(4);
fill_array_data_.reserve(4);
@@ -992,7 +1061,7 @@
pointer_storage_.reserve(128);
slow_paths_.reserve(32);
// Reserve pointer id 0 for nullptr.
- size_t null_idx = WrapPointer(nullptr);
+ size_t null_idx = WrapPointer<void>(nullptr);
DCHECK_EQ(null_idx, 0U);
}
@@ -1201,8 +1270,7 @@
data_target->operands[2] = type;
}
// Loads a code pointer. Code from oat file can be mapped anywhere.
- LIR* load_pc_rel = OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target);
- AppendLIR(load_pc_rel);
+ OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target);
DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
}
@@ -1219,8 +1287,7 @@
data_target->operands[2] = type;
}
// Loads an ArtMethod pointer, which is a reference as it lives in the heap.
- LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
- AppendLIR(load_pc_rel);
+ OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
}
@@ -1234,8 +1301,7 @@
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(&dex_file));
}
// Loads a Class pointer, which is a reference as it lives in the heap.
- LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
- AppendLIR(load_pc_rel);
+ OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
}
std::vector<uint8_t>* Mir2Lir::ReturnFrameDescriptionEntry() {
@@ -1274,4 +1340,102 @@
UNREACHABLE();
}
+void Mir2Lir::InitReferenceVRegs(BasicBlock* bb, BitVector* references) {
+ // Mark the references coming from the first predecessor.
+ DCHECK(bb != nullptr);
+ DCHECK(bb->block_type == kEntryBlock || !bb->predecessors.empty());
+ BasicBlock* first_bb =
+ (bb->block_type == kEntryBlock) ? bb : mir_graph_->GetBasicBlock(bb->predecessors[0]);
+ DCHECK(first_bb != nullptr);
+ DCHECK(first_bb->data_flow_info != nullptr);
+ DCHECK(first_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ const int32_t* first_vreg_to_ssa_map = first_bb->data_flow_info->vreg_to_ssa_map_exit;
+ references->ClearAllBits();
+ for (uint32_t vreg = 0, num_vregs = mir_graph_->GetNumOfCodeVRs(); vreg != num_vregs; ++vreg) {
+ int32_t sreg = first_vreg_to_ssa_map[vreg];
+ if (sreg != INVALID_SREG && mir_graph_->reg_location_[sreg].ref &&
+ !mir_graph_->IsConstantNullRef(mir_graph_->reg_location_[sreg])) {
+ references->SetBit(vreg);
+ }
+ }
+ // Unmark the references that are merging with a different value.
+ for (size_t i = 1u, num_pred = bb->predecessors.size(); i < num_pred; ++i) {
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(bb->predecessors[i]);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ const int32_t* pred_vreg_to_ssa_map = pred_bb->data_flow_info->vreg_to_ssa_map_exit;
+ for (uint32_t vreg : references->Indexes()) {
+ if (first_vreg_to_ssa_map[vreg] != pred_vreg_to_ssa_map[vreg]) {
+ // NOTE: The BitVectorSet::IndexIterator will not check the pointed-to bit again,
+ // so clearing the bit has no effect on the iterator.
+ references->ClearBit(vreg);
+ }
+ }
+ }
+ if (bb->block_type != kEntryBlock && bb->first_mir_insn != nullptr &&
+ static_cast<int>(bb->first_mir_insn->dalvikInsn.opcode) == kMirOpCheckPart2) {
+ // In Mir2Lir::MethodBlockCodeGen() we have artificially moved the throwing
+ // instruction to the previous block. However, the MIRGraph data used above
+ // doesn't reflect that, so we still need to process that MIR insn here.
+ MIR* mir = nullptr;
+ BasicBlock* pred_bb = bb;
+ // Traverse empty blocks.
+ while (mir == nullptr && pred_bb->predecessors.size() == 1u) {
+ pred_bb = mir_graph_->GetBasicBlock(bb->predecessors[0]);
+ DCHECK(pred_bb != nullptr);
+ mir = pred_bb->last_mir_insn;
+ }
+ DCHECK(mir != nullptr);
+ UpdateReferenceVRegsLocal(nullptr, mir, references);
+ }
+}
+
+bool Mir2Lir::UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references) {
+ DCHECK(mir == nullptr || mir->bb == prev_mir->bb);
+ DCHECK(prev_mir != nullptr);
+ while (prev_mir != nullptr) {
+ if (prev_mir == mir) {
+ return true;
+ }
+ const size_t num_defs = prev_mir->ssa_rep->num_defs;
+ const int32_t* defs = prev_mir->ssa_rep->defs;
+ if (num_defs == 1u && mir_graph_->reg_location_[defs[0]].ref &&
+ !mir_graph_->IsConstantNullRef(mir_graph_->reg_location_[defs[0]])) {
+ references->SetBit(mir_graph_->SRegToVReg(defs[0]));
+ } else {
+ for (size_t i = 0u; i != num_defs; ++i) {
+ references->ClearBit(mir_graph_->SRegToVReg(defs[i]));
+ }
+ }
+ prev_mir = prev_mir->next;
+ }
+ return false;
+}
+
+void Mir2Lir::UpdateReferenceVRegs(MIR* mir, MIR* prev_mir, BitVector* references) {
+ if (mir == nullptr) {
+ // Safepoint in entry sequence.
+ InitReferenceVRegs(mir_graph_->GetEntryBlock(), references);
+ return;
+ }
+ if (IsInstructionReturn(mir->dalvikInsn.opcode) ||
+ mir->dalvikInsn.opcode == Instruction::RETURN_VOID_NO_BARRIER) {
+ references->ClearAllBits();
+ if (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT) {
+ references->SetBit(mir_graph_->SRegToVReg(mir->ssa_rep->uses[0]));
+ }
+ return;
+ }
+ if (prev_mir != nullptr && mir->bb == prev_mir->bb &&
+ UpdateReferenceVRegsLocal(mir, prev_mir, references)) {
+ return;
+ }
+ BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb);
+ DCHECK(bb != nullptr);
+ InitReferenceVRegs(bb, references);
+ bool success = UpdateReferenceVRegsLocal(mir, bb->first_mir_insn, references);
+ DCHECK(success) << "MIR @0x" << std::hex << mir->offset << " not in BB#" << std::dec << mir->bb;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 32a469d..2bcaaca 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -86,7 +86,7 @@
class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch_in)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in) {
+ : LIRSlowPath(m2l, branch_in) {
}
void Compile() OVERRIDE {
@@ -105,7 +105,7 @@
public:
ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, RegStorage index_in,
RegStorage length_in)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ : LIRSlowPath(m2l, branch_in),
index_(index_in), length_(length_in) {
}
@@ -129,7 +129,7 @@
class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, int index_in, RegStorage length_in)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ : LIRSlowPath(m2l, branch_in),
index_(index_in), length_(length_in) {
}
@@ -159,7 +159,7 @@
class NullCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
NullCheckSlowPath(Mir2Lir* m2l, LIR* branch)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) {
+ : LIRSlowPath(m2l, branch) {
}
void Compile() OVERRIDE {
@@ -581,7 +581,7 @@
// At least one will be non-null here, otherwise we wouldn't generate the slow path.
StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
RegStorage r_base)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved != nullptr ? unresolved : uninit, cont),
+ : LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
second_branch_(unresolved != nullptr ? uninit : nullptr),
storage_index_(storage_index), r_base_(r_base) {
}
@@ -1052,9 +1052,9 @@
class SlowPath : public LIRSlowPath {
public:
SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in,
- const RegLocation& rl_method_in, const RegLocation& rl_result_in) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont_in),
- type_idx_(type_idx_in), rl_method_(rl_method_in), rl_result_(rl_result_in) {
+ const RegLocation& rl_method_in, const RegLocation& rl_result_in)
+ : LIRSlowPath(m2l, fromfast, cont_in),
+ type_idx_(type_idx_in), rl_method_(rl_method_in), rl_result_(rl_result_in) {
}
void Compile() {
@@ -1120,9 +1120,9 @@
class SlowPath : public LIRSlowPath {
public:
SlowPath(Mir2Lir* m2l, LIR* fromfast_in, LIR* cont_in, RegStorage r_method_in,
- int32_t string_idx_in) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast_in, cont_in),
- r_method_(r_method_in), string_idx_(string_idx_in) {
+ int32_t string_idx_in)
+ : LIRSlowPath(m2l, fromfast_in, cont_in),
+ r_method_(r_method_in), string_idx_(string_idx_in) {
}
void Compile() {
@@ -1304,7 +1304,7 @@
public:
InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx_in,
RegLocation rl_src_in)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx_in),
+ : LIRSlowPath(m2l, branch, cont), type_idx_(type_idx_in),
rl_src_(rl_src_in) {
}
@@ -1453,9 +1453,9 @@
class SlowPath : public LIRSlowPath {
public:
SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in,
- const RegStorage class_reg_in) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont_in),
- type_idx_(type_idx_in), class_reg_(class_reg_in) {
+ const RegStorage class_reg_in)
+ : LIRSlowPath(m2l, fromfast, cont_in),
+ type_idx_(type_idx_in), class_reg_(class_reg_in) {
}
void Compile() {
@@ -1484,8 +1484,8 @@
// to call a helper function to do the check.
class SlowPath : public LIRSlowPath {
public:
- SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load):
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) {
+ SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load)
+ : LIRSlowPath(m2l, fromfast, cont), load_(load) {
}
void Compile() {
@@ -1874,8 +1874,8 @@
int32_t divisor = mir_graph_->ConstantValue(rl_src2);
if (CanDivideByReciprocalMultiplyFloat(divisor)) {
// Generate multiply by reciprocal instead of div.
- float recip = 1.0f/bit_cast<int32_t, float>(divisor);
- GenMultiplyByConstantFloat(rl_dest, rl_src1, bit_cast<float, int32_t>(recip));
+ float recip = 1.0f/bit_cast<float, int32_t>(divisor);
+ GenMultiplyByConstantFloat(rl_dest, rl_src1, bit_cast<int32_t, float>(recip));
return true;
}
} else {
@@ -1883,7 +1883,7 @@
if (CanDivideByReciprocalMultiplyDouble(divisor)) {
// Generate multiply by reciprocal instead of div.
double recip = 1.0/bit_cast<double, int64_t>(divisor);
- GenMultiplyByConstantDouble(rl_dest, rl_src1, bit_cast<double, int64_t>(recip));
+ GenMultiplyByConstantDouble(rl_dest, rl_src1, bit_cast<int64_t, double>(recip));
return true;
}
}
@@ -2151,9 +2151,6 @@
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
LoadConstantNoClobber(rl_result.reg, value);
StoreValue(rl_dest, rl_result);
- if (value == 0) {
- Workaround7250540(rl_dest, rl_result.reg);
- }
}
void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
@@ -2179,7 +2176,7 @@
class Mir2Lir::SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont) {
+ : LIRSlowPath(m2l, branch, cont) {
}
void Compile() OVERRIDE {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 6b553fd..2d41ba1 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -48,7 +48,8 @@
class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
public:
IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in)
- : LIRSlowPath(m2l, info_in->offset, branch_in, resume_in), info_(info_in) {
+ : LIRSlowPath(m2l, branch_in, resume_in), info_(info_in) {
+ DCHECK_EQ(info_in->offset, current_dex_pc_);
}
void Compile() {
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index db844bc..b71691f 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -37,48 +37,6 @@
}
/*
- * Temporary workaround for Issue 7250540. If we're loading a constant zero into a
- * promoted floating point register, also copy a zero into the int/ref identity of
- * that sreg.
- */
-void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
- if (rl_dest.fp) {
- int pmap_index = SRegToPMap(rl_dest.s_reg_low);
- const bool is_fp_promoted = promotion_map_[pmap_index].fp_location == kLocPhysReg;
- const bool is_core_promoted = promotion_map_[pmap_index].core_location == kLocPhysReg;
- if (is_fp_promoted || is_core_promoted) {
- // Now, determine if this vreg is ever used as a reference. If not, we're done.
- bool used_as_reference = false;
- int base_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
- for (int i = 0; !used_as_reference && (i < mir_graph_->GetNumSSARegs()); i++) {
- if (mir_graph_->SRegToVReg(mir_graph_->reg_location_[i].s_reg_low) == base_vreg) {
- used_as_reference |= mir_graph_->reg_location_[i].ref;
- }
- }
- if (!used_as_reference) {
- return;
- }
- RegStorage temp_reg = zero_reg;
- if (!temp_reg.Valid()) {
- temp_reg = AllocTemp();
- LoadConstant(temp_reg, 0);
- }
- if (is_core_promoted) {
- // Promoted - just copy in a zero
- OpRegCopy(RegStorage::Solo32(promotion_map_[pmap_index].core_reg), temp_reg);
- } else {
- // Lives in the frame, need to store.
- ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32, kNotVolatile);
- }
- if (!zero_reg.Valid()) {
- FreeTemp(temp_reg);
- }
- }
- }
-}
-
-/*
* Load a Dalvik register into a physical register. Take care when
* using this routine, as it doesn't perform any bookkeeping regarding
* register liveness. That is the responsibility of the caller.
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 5c98b10..936ff42 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -77,7 +77,7 @@
*
* [!] escape. To insert "!", use "!!"
*/
-/* NOTE: must be kept in sync with enum MipsOpcode from LIR.h */
+/* NOTE: must be kept in sync with enum MipsOpcode from mips_lir.h */
/*
* TUNING: We're currently punting on the branch delay slots. All branch
* instructions in this map are given a size of 8, which during assembly
@@ -85,6 +85,7 @@
* an assembler pass to fill those slots when possible.
*/
const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
+ // The following are common mips32r2, mips32r6 and mips64r6 instructions.
ENCODING_MAP(kMips32BitData, 0x00000000,
kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP,
@@ -117,7 +118,7 @@
kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
- ENCODING_MAP(kMipsBeqz, 0x10000000, /* same as beq above with t = $zero */
+ ENCODING_MAP(kMipsBeqz, 0x10000000, // Same as beq above with t = $zero.
kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
@@ -137,7 +138,7 @@
kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMipsBnez, 0x14000000, /* same as bne below with t = $zero */
+ ENCODING_MAP(kMipsBnez, 0x14000000, // Same as bne below with t = $zero.
kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
@@ -145,14 +146,98 @@
kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
- ENCODING_MAP(kMipsDiv, 0x0000001a,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
- "div", "!0r,!1r", 4),
ENCODING_MAP(kMipsExt, 0x7c000000,
kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
"ext", "!0r,!1r,!2d,!3D", 4),
+ ENCODING_MAP(kMipsFaddd, 0x46200000,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "add.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFadds, 0x46000000,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "add.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFsubd, 0x46200001,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sub.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFsubs, 0x46000001,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sub.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFdivd, 0x46200003,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFdivs, 0x46000003,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFmuld, 0x46200002,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFmuls, 0x46000002,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFcvtsd, 0x46200020,
+ kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.s.d", "!0s,!1S", 4),
+ ENCODING_MAP(kMipsFcvtsw, 0x46800020,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.s.w", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFcvtds, 0x46000021,
+ kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.d.s", "!0S,!1s", 4),
+ ENCODING_MAP(kMipsFcvtdw, 0x46800021,
+ kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.d.w", "!0S,!1s", 4),
+ ENCODING_MAP(kMipsFcvtwd, 0x46200024,
+ kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.w.d", "!0s,!1S", 4),
+ ENCODING_MAP(kMipsFcvtws, 0x46000024,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.w.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFmovd, 0x46200006,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov.d", "!0S,!1S", 4),
+ ENCODING_MAP(kMipsFmovs, 0x46000006,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFnegd, 0x46200007,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "neg.d", "!0S,!1S", 4),
+ ENCODING_MAP(kMipsFnegs, 0x46000007,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "neg.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFldc1, 0xd4000000,
+ kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "ldc1", "!0S,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsFlwc1, 0xc4000000,
+ kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lwc1", "!0s,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsFsdc1, 0xf4000000,
+ kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sdc1", "!0S,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsFswc1, 0xe4000000,
+ kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "swc1", "!0s,!1d(!2r)", 4),
ENCODING_MAP(kMipsJal, 0x0c000000,
kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
@@ -197,31 +282,31 @@
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
"lw", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMipsMfhi, 0x00000010,
- kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_HI,
- "mfhi", "!0r", 4),
- ENCODING_MAP(kMipsMflo, 0x00000012,
- kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_LO,
- "mflo", "!0r", 4),
- ENCODING_MAP(kMipsMove, 0x00000025, /* or using zero reg */
+ ENCODING_MAP(kMipsMove, 0x00000025, // Or using zero reg.
kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"move", "!0r,!1r", 4),
- ENCODING_MAP(kMipsMovz, 0x0000000a,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "movz", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMipsMul, 0x70000002,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsMfc1, 0x44000000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mfc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMipsMtc1, 0x44800000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+ "mtc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMipsMfhc1, 0x44600000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mfhc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMipsMthc1, 0x44e00000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+ "mthc1", "!0r,!1s", 4),
ENCODING_MAP(kMipsNop, 0x00000000,
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, NO_OPERAND,
"nop", ";", 4),
- ENCODING_MAP(kMipsNor, 0x00000027, /* used for "not" too */
+ ENCODING_MAP(kMipsNor, 0x00000027, // Used for "not" too.
kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"nor", "!0r,!1r,!2r", 4),
@@ -289,7 +374,7 @@
kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"srlv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMipsSubu, 0x00000023, /* used for "neg" too */
+ ENCODING_MAP(kMipsSubu, 0x00000023, // Used for "neg" too.
kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"subu", "!0r,!1r,!2r", 4),
@@ -297,6 +382,10 @@
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
"sw", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsSync, 0x0000000f,
+ kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP,
+ "sync", ";", 4),
ENCODING_MAP(kMipsXor, 0x00000026,
kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -305,95 +394,143 @@
kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
"xori", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMipsFadds, 0x46000000,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+
+ // The following are mips32r2 instructions.
+ ENCODING_MAP(kMipsR2Div, 0x0000001a,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
+ "div", "!0r,!1r", 4),
+ ENCODING_MAP(kMipsR2Mul, 0x70000002,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "add.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMipsFsubs, 0x46000001,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ "mul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsR2Mfhi, 0x00000010,
+ kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_HI,
+ "mfhi", "!0r", 4),
+ ENCODING_MAP(kMipsR2Mflo, 0x00000012,
+ kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_LO,
+ "mflo", "!0r", 4),
+ ENCODING_MAP(kMipsR2Movz, 0x0000000a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sub.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMipsFmuls, 0x46000002,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ "movz", "!0r,!1r,!2r", 4),
+
+ // The following are mips32r6 and mips64r6 instructions.
+ ENCODING_MAP(kMipsR6Div, 0x0000009a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMipsFdivs, 0x46000003,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ "div", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsR6Mod, 0x000000da,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMipsFaddd, 0x46200000,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ "mod", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsR6Mul, 0x00000098,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "add.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMipsFsubd, 0x46200001,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ "mul", "!0r,!1r,!2r", 4),
+
+ // The following are mips64r6 instructions.
+ ENCODING_MAP(kMips64Daddiu, 0x64000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "daddiu", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Daddu, 0x0000002d,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sub.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMipsFmuld, 0x46200002,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ "daddu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dahi, 0x04060000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+ "dahi", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Dati, 0x041E0000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+ "dati", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Daui, 0x74000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "daui", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Ddiv, 0x0000009e,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMipsFdivd, 0x46200003,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ "ddiv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmod, 0x000000de,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMipsFcvtsd, 0x46200020,
- kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ "dmod", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmul, 0x0000009c,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dmul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmfc1, 0x44200000,
+ kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.s.d", "!0s,!1S", 4),
- ENCODING_MAP(kMipsFcvtsw, 0x46800020,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.s.w", "!0s,!1s", 4),
- ENCODING_MAP(kMipsFcvtds, 0x46000021,
- kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.d.s", "!0S,!1s", 4),
- ENCODING_MAP(kMipsFcvtdw, 0x46800021,
- kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.d.w", "!0S,!1s", 4),
- ENCODING_MAP(kMipsFcvtws, 0x46000024,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.w.s", "!0s,!1s", 4),
- ENCODING_MAP(kMipsFcvtwd, 0x46200024,
- kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.w.d", "!0s,!1S", 4),
- ENCODING_MAP(kMipsFmovs, 0x46000006,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mov.s", "!0s,!1s", 4),
- ENCODING_MAP(kMipsFmovd, 0x46200006,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mov.d", "!0S,!1S", 4),
- ENCODING_MAP(kMipsFlwc1, 0xC4000000,
- kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lwc1", "!0s,!1d(!2r)", 4),
- ENCODING_MAP(kMipsFldc1, 0xD4000000,
- kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "ldc1", "!0S,!1d(!2r)", 4),
- ENCODING_MAP(kMipsFswc1, 0xE4000000,
- kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "swc1", "!0s,!1d(!2r)", 4),
- ENCODING_MAP(kMipsFsdc1, 0xF4000000,
- kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sdc1", "!0S,!1d(!2r)", 4),
- ENCODING_MAP(kMipsMfc1, 0x44000000,
- kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mfc1", "!0r,!1s", 4),
- ENCODING_MAP(kMipsMtc1, 0x44800000,
- kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ "dmfc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Dmtc1, 0x44a00000,
+ kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
- "mtc1", "!0r,!1s", 4),
- ENCODING_MAP(kMipsDelta, 0x27e00000,
+ "dmtc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Drotr32, 0x0000003e | (1 << 21),
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "drotr32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsll, 0x00000038,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsll", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsll32, 0x0000003c,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsll32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsrl, 0x0000003a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsrl", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsrl32, 0x0000003e,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsrl32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsra, 0x0000003b,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsra", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsra32, 0x0000003f,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsra32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsllv, 0x00000014,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsllv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsrlv, 0x00000016,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsrlv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsrav, 0x00000017,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsrav", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsubu, 0x0000002f,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsubu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Ld, 0xdc000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "ld", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lwu, 0x9c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lwu", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Sd, 0xfc000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sd", "!0r,!1d(!2r)", 4),
+
+ // The following are pseudoinstructions.
+ ENCODING_MAP(kMipsDelta, 0x27e00000, // It is implemented as daddiu for mips64.
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
NEEDS_FIXUP, "addiu", "!0r,ra,0x!1h(!1d)", 4),
@@ -409,10 +546,6 @@
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
"addiu", "ra,pc,8", 4),
- ENCODING_MAP(kMipsSync, 0x0000000f,
- kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP,
- "sync", ";", 4),
ENCODING_MAP(kMipsUndefined, 0x64000000,
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, NO_OPERAND,
@@ -515,14 +648,13 @@
*/
AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
LIR *lir;
- AssemblerStatus res = kSuccess; // Assume success
+ AssemblerStatus res = kSuccess; // Assume success.
for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
continue;
}
-
if (lir->flags.is_nop) {
continue;
}
@@ -539,41 +671,49 @@
* and is found in lir->target. If operands[3] is non-NULL,
* then it is a Switch/Data table.
*/
- int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
- EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
+ const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
- // Fits
+ // Fits.
lir->operands[1] = delta;
+ if (cu_->target64) {
+ LIR *new_addiu = RawLIR(lir->dalvik_offset, kMips64Daddiu, lir->operands[0], rRAd,
+ delta);
+ InsertLIRBefore(lir, new_addiu);
+ NopLIR(lir);
+ res = kRetryAll;
+ }
} else {
- // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
- LIR *new_delta_hi =
- RawLIR(lir->dalvik_offset, kMipsDeltaHi,
- lir->operands[0], 0, lir->operands[2],
- lir->operands[3], 0, lir->target);
+ // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair.
+ LIR *new_delta_hi = RawLIR(lir->dalvik_offset, kMipsDeltaHi, lir->operands[0], 0,
+ lir->operands[2], lir->operands[3], 0, lir->target);
InsertLIRBefore(lir, new_delta_hi);
- LIR *new_delta_lo =
- RawLIR(lir->dalvik_offset, kMipsDeltaLo,
- lir->operands[0], 0, lir->operands[2],
- lir->operands[3], 0, lir->target);
+ LIR *new_delta_lo = RawLIR(lir->dalvik_offset, kMipsDeltaLo, lir->operands[0], 0,
+ lir->operands[2], lir->operands[3], 0, lir->target);
InsertLIRBefore(lir, new_delta_lo);
- LIR *new_addu =
- RawLIR(lir->dalvik_offset, kMipsAddu,
- lir->operands[0], lir->operands[0], rRA);
+ LIR *new_addu;
+ if (cu_->target64) {
+ new_addu = RawLIR(lir->dalvik_offset, kMips64Daddu, lir->operands[0], lir->operands[0],
+ rRAd);
+ } else {
+ new_addu = RawLIR(lir->dalvik_offset, kMipsAddu, lir->operands[0], lir->operands[0],
+ rRA);
+ }
InsertLIRBefore(lir, new_addu);
NopLIR(lir);
res = kRetryAll;
}
} else if (lir->opcode == kMipsDeltaLo) {
- int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
- EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
+ const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
lir->operands[1] = delta & 0xffff;
} else if (lir->opcode == kMipsDeltaHi) {
- int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
- EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
+ const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
lir->operands[1] = (delta >> 16) & 0xffff;
@@ -675,7 +815,9 @@
case kFmtDfp: {
// TODO: do we need to adjust now that we're using 64BitSolo?
DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
- DCHECK_EQ((operand & 0x1), 0U);
+ if (!cu_->target64) {
+ DCHECK_EQ((operand & 0x1), 0U); // May only use even numbered registers for mips32.
+ }
value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
((1 << (encoder->field_loc[i].end + 1)) - 1);
bits |= value;
@@ -696,7 +838,7 @@
code_buffer_.push_back((bits >> 8) & 0xff);
code_buffer_.push_back((bits >> 16) & 0xff);
code_buffer_.push_back((bits >> 24) & 0xff);
- // TUNING: replace with proper delay slot handling
+ // TUNING: replace with proper delay slot handling.
if (encoder->size == 8) {
DCHECK(!IsPseudoLirOp(lir->opcode));
const MipsEncodingMap *encoder2 = &EncodingMap[kMipsNop];
@@ -735,7 +877,7 @@
lir->operands[0] = 0;
}
}
- /* Pseudo opcodes don't consume space */
+ // Pseudo opcodes don't consume space.
}
return offset;
}
@@ -748,10 +890,10 @@
void MipsMir2Lir::AssignOffsets() {
int offset = AssignInsnOffsets();
- /* Const values have to be word aligned */
+ // Const values have to be word aligned.
offset = RoundUp(offset, 4);
- /* Set up offsets for literals */
+ // Set up offsets for literals.
data_offset_ = offset;
offset = AssignLiteralOffset(offset);
@@ -788,19 +930,19 @@
CodegenDump();
LOG(FATAL) << "Assembler error - too many retries";
}
- // Redo offsets and try again
+ // Redo offsets and try again.
AssignOffsets();
code_buffer_.clear();
}
}
- // Install literals
+ // Install literals.
InstallLiteralPools();
- // Install switch tables
+ // Install switch tables.
InstallSwitchTables();
- // Install fill array data
+ // Install fill array data.
InstallFillArrayData();
// Create the mapping table and native offset to reference map.
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index d9471f6..de66b35 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -68,7 +68,7 @@
*/
void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- // Add the table to the list - we'll process it later
+ // Add the table to the list - we'll process it later.
SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
tab_rec->switch_mir = mir;
@@ -77,39 +77,39 @@
int elements = table[1];
switch_tables_.push_back(tab_rec);
- // The table is composed of 8-byte key/disp pairs
+ // The table is composed of 8-byte key/disp pairs.
int byte_size = elements * 8;
int size_hi = byte_size >> 16;
int size_lo = byte_size & 0xffff;
- RegStorage r_end = AllocTemp();
+ RegStorage r_end = AllocPtrSizeTemp();
if (size_hi) {
NewLIR2(kMipsLui, r_end.GetReg(), size_hi);
}
- // Must prevent code motion for the curr pc pair
+ // Must prevent code motion for the curr pc pair.
GenBarrier(); // Scheduling barrier
- NewLIR0(kMipsCurrPC); // Really a jal to .+8
- // Now, fill the branch delay slot
+ NewLIR0(kMipsCurrPC); // Really a jal to .+8.
+ // Now, fill the branch delay slot.
if (size_hi) {
NewLIR3(kMipsOri, r_end.GetReg(), r_end.GetReg(), size_lo);
} else {
NewLIR3(kMipsOri, r_end.GetReg(), rZERO, size_lo);
}
- GenBarrier(); // Scheduling barrier
+ GenBarrier(); // Scheduling barrier.
- // Construct BaseLabel and set up table base register
+ // Construct BaseLabel and set up table base register.
LIR* base_label = NewLIR0(kPseudoTargetLabel);
- // Remember base label so offsets can be computed later
+ // Remember base label so offsets can be computed later.
tab_rec->anchor = base_label;
- RegStorage r_base = AllocTemp();
+ RegStorage r_base = AllocPtrSizeTemp();
NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
OpRegRegReg(kOpAdd, r_end, r_end, r_base);
- // Grab switch test value
+ // Grab switch test value.
rl_src = LoadValue(rl_src, kCoreReg);
- // Test loop
+ // Test loop.
RegStorage r_key = AllocTemp();
LIR* loop_label = NewLIR0(kPseudoTargetLabel);
LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
@@ -118,10 +118,10 @@
OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
RegStorage r_disp = AllocTemp();
Load32Disp(r_base, -4, r_disp);
- OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp);
- OpReg(kOpBx, rs_rRA);
-
- // Loop exit
+ const RegStorage rs_ra = TargetPtrReg(kLr);
+ OpRegRegReg(kOpAdd, rs_ra, rs_ra, r_disp);
+ OpReg(kOpBx, rs_ra);
+ // Loop exit.
LIR* exit_label = NewLIR0(kPseudoTargetLabel);
exit_branch->target = exit_label;
}
@@ -141,7 +141,7 @@
*/
void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- // Add the table to the list - we'll process it later
+ // Add the table to the list - we'll process it later.
SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
tab_rec->switch_mir = mir;
@@ -150,10 +150,10 @@
int size = table[1];
switch_tables_.push_back(tab_rec);
- // Get the switch value
+ // Get the switch value.
rl_src = LoadValue(rl_src, kCoreReg);
- // Prepare the bias. If too big, handle 1st stage here
+ // Prepare the bias. If too big, handle 1st stage here.
int low_key = s4FromSwitchData(&table[2]);
bool large_bias = false;
RegStorage r_key;
@@ -167,10 +167,10 @@
r_key = AllocTemp();
}
- // Must prevent code motion for the curr pc pair
+ // Must prevent code motion for the curr pc pair.
GenBarrier();
- NewLIR0(kMipsCurrPC); // Really a jal to .+8
- // Now, fill the branch delay slot with bias strip
+ NewLIR0(kMipsCurrPC); // Really a jal to .+8.
+ // Now, fill the branch delay slot with bias strip.
if (low_key == 0) {
NewLIR0(kMipsNop);
} else {
@@ -180,51 +180,60 @@
OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key);
}
}
- GenBarrier(); // Scheduling barrier
+ GenBarrier(); // Scheduling barrier.
- // Construct BaseLabel and set up table base register
+ // Construct BaseLabel and set up table base register.
LIR* base_label = NewLIR0(kPseudoTargetLabel);
- // Remember base label so offsets can be computed later
+ // Remember base label so offsets can be computed later.
tab_rec->anchor = base_label;
- // Bounds check - if < 0 or >= size continue following switch
+ // Bounds check - if < 0 or >= size continue following switch.
LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
- // Materialize the table base pointer
- RegStorage r_base = AllocTemp();
+ // Materialize the table base pointer.
+ RegStorage r_base = AllocPtrSizeTemp();
NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
- // Load the displacement from the switch table
+ // Load the displacement from the switch table.
RegStorage r_disp = AllocTemp();
LoadBaseIndexed(r_base, r_key, r_disp, 2, k32);
- // Add to rAP and go
- OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp);
- OpReg(kOpBx, rs_rRA);
+ // Add to rRA and go.
+ const RegStorage rs_ra = TargetPtrReg(kLr);
+ OpRegRegReg(kOpAdd, rs_ra, rs_ra, r_disp);
+ OpReg(kOpBx, rs_ra);
- /* branch_over target here */
+ // Branch_over target here.
LIR* target = NewLIR0(kPseudoTargetLabel);
branch_over->target = target;
}
void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
- int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
+ int ex_offset = cu_->target64 ? Thread::ExceptionOffset<8>().Int32Value() :
+ Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
RegStorage reset_reg = AllocTempRef();
- LoadRefDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg, kNotVolatile);
+ LoadRefDisp(TargetPtrReg(kSelf), ex_offset, rl_result.reg, kNotVolatile);
LoadConstant(reset_reg, 0);
- StoreRefDisp(rs_rMIPS_SELF, ex_offset, reset_reg, kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSelf), ex_offset, reset_reg, kNotVolatile);
FreeTemp(reset_reg);
StoreValue(rl_dest, rl_result);
}
void MipsMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
- RegStorage reg_card_base = AllocTemp();
- RegStorage reg_card_no = AllocTemp();
- // NOTE: native pointer.
- LoadWordDisp(rs_rMIPS_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
- OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
- StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
+ RegStorage reg_card_base = AllocPtrSizeTemp();
+ RegStorage reg_card_no = AllocPtrSizeTemp();
+ if (cu_->target64) {
+ // NOTE: native pointer.
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
+ OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+ StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 0, kUnsignedByte);
+ } else {
+ // NOTE: native pointer.
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
+ OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+ StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
+ }
FreeTemp(reg_card_base);
FreeTemp(reg_card_no);
}
@@ -232,46 +241,70 @@
void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
int spill_count = num_core_spills_ + num_fp_spills_;
/*
- * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live. Let the register
- * allocation mechanism know so it doesn't try to use any of them when
- * expanding the frame or flushing. This leaves the utility
- * code with a single temp: r12. This should be enough.
+ * On entry, A0, A1, A2 & A3 are live. On Mips64, A4, A5, A6 & A7 are also live.
+ * Let the register allocation mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing.
*/
- LockTemp(rs_rMIPS_ARG0);
- LockTemp(rs_rMIPS_ARG1);
- LockTemp(rs_rMIPS_ARG2);
- LockTemp(rs_rMIPS_ARG3);
+ const RegStorage arg0 = TargetReg(kArg0);
+ const RegStorage arg1 = TargetReg(kArg1);
+ const RegStorage arg2 = TargetReg(kArg2);
+ const RegStorage arg3 = TargetReg(kArg3);
+ const RegStorage arg4 = TargetReg(kArg4);
+ const RegStorage arg5 = TargetReg(kArg5);
+ const RegStorage arg6 = TargetReg(kArg6);
+ const RegStorage arg7 = TargetReg(kArg7);
+
+ LockTemp(arg0);
+ LockTemp(arg1);
+ LockTemp(arg2);
+ LockTemp(arg3);
+ if (cu_->target64) {
+ LockTemp(arg4);
+ LockTemp(arg5);
+ LockTemp(arg6);
+ LockTemp(arg7);
+ }
+
+ bool skip_overflow_check;
+ InstructionSet target = (cu_->target64) ? kMips64 : kMips;
+ int ptr_size = cu_->target64 ? 8 : 4;
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kMips);
+
+ skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, target);
NewLIR0(kPseudoMethodEntry);
- RegStorage check_reg = AllocTemp();
- RegStorage new_sp = AllocTemp();
+ RegStorage check_reg = AllocPtrSizeTemp();
+ RegStorage new_sp = AllocPtrSizeTemp();
+ const RegStorage rs_sp = TargetPtrReg(kSp);
if (!skip_overflow_check) {
- /* Load stack limit */
- Load32Disp(rs_rMIPS_SELF, Thread::StackEndOffset<4>().Int32Value(), check_reg);
+ // Load stack limit.
+ if (cu_->target64) {
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::StackEndOffset<8>().Int32Value(), check_reg);
+ } else {
+ Load32Disp(TargetPtrReg(kSelf), Thread::StackEndOffset<4>().Int32Value(), check_reg);
+ }
}
- /* Spill core callee saves */
+ // Spill core callee saves.
SpillCoreRegs();
- /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+ // NOTE: promotion of FP regs currently unsupported, thus no FP spill.
DCHECK_EQ(num_fp_spills_, 0);
- const int frame_sub = frame_size_ - spill_count * 4;
+ const int frame_sub = frame_size_ - spill_count * ptr_size;
if (!skip_overflow_check) {
class StackOverflowSlowPath : public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) {
+ : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) {
}
void Compile() OVERRIDE {
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- // LR is offset 0 since we push in reverse order.
- m2l_->Load32Disp(rs_rMIPS_SP, 0, rs_rRA);
- m2l_->OpRegImm(kOpAdd, rs_rMIPS_SP, sp_displace_);
+ // RA is offset 0 since we push in reverse order.
+ m2l_->LoadWordDisp(m2l_->TargetPtrReg(kSp), 0, m2l_->TargetPtrReg(kLr));
+ m2l_->OpRegImm(kOpAdd, m2l_->TargetPtrReg(kSp), sp_displace_);
m2l_->ClobberCallerSave();
RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow); // Doesn't clobber LR.
m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
@@ -281,21 +314,27 @@
private:
const size_t sp_displace_;
};
- OpRegRegImm(kOpSub, new_sp, rs_rMIPS_SP, frame_sub);
+ OpRegRegImm(kOpSub, new_sp, rs_sp, frame_sub);
LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
- AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 4));
+ AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * ptr_size));
// TODO: avoid copy for small frame sizes.
- OpRegCopy(rs_rMIPS_SP, new_sp); // Establish stack
+ OpRegCopy(rs_sp, new_sp); // Establish stack.
} else {
- OpRegImm(kOpSub, rs_rMIPS_SP, frame_sub);
+ OpRegImm(kOpSub, rs_sp, frame_sub);
}
FlushIns(ArgLocs, rl_method);
- FreeTemp(rs_rMIPS_ARG0);
- FreeTemp(rs_rMIPS_ARG1);
- FreeTemp(rs_rMIPS_ARG2);
- FreeTemp(rs_rMIPS_ARG3);
+ FreeTemp(arg0);
+ FreeTemp(arg1);
+ FreeTemp(arg2);
+ FreeTemp(arg3);
+ if (cu_->target64) {
+ FreeTemp(arg4);
+ FreeTemp(arg5);
+ FreeTemp(arg6);
+ FreeTemp(arg7);
+ }
}
void MipsMir2Lir::GenExitSequence() {
@@ -303,58 +342,67 @@
* In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
* allocated by the register utilities as temps.
*/
- LockTemp(rs_rMIPS_RET0);
- LockTemp(rs_rMIPS_RET1);
+ LockTemp(TargetPtrReg(kRet0));
+ LockTemp(TargetPtrReg(kRet1));
NewLIR0(kPseudoMethodExit);
UnSpillCoreRegs();
- OpReg(kOpBx, rs_rRA);
+ OpReg(kOpBx, TargetPtrReg(kLr));
}
void MipsMir2Lir::GenSpecialExitSequence() {
- OpReg(kOpBx, rs_rRA);
+ OpReg(kOpBx, TargetPtrReg(kLr));
}
void MipsMir2Lir::GenSpecialEntryForSuspend() {
- // Keep 16-byte stack alignment - push A0, i.e. ArtMethod*, 2 filler words and RA.
- core_spill_mask_ = (1u << rs_rRA.GetRegNum());
+ // Keep 16-byte stack alignment - push A0, i.e. ArtMethod*, 2 filler words and RA for mips32,
+ // but A0 and RA for mips64.
+ core_spill_mask_ = (1u << TargetPtrReg(kLr).GetRegNum());
num_core_spills_ = 1u;
fp_spill_mask_ = 0u;
num_fp_spills_ = 0u;
frame_size_ = 16u;
core_vmap_table_.clear();
fp_vmap_table_.clear();
- OpRegImm(kOpSub, rs_rMIPS_SP, frame_size_);
- Store32Disp(rs_rMIPS_SP, frame_size_ - 4, rs_rRA);
- Store32Disp(rs_rMIPS_SP, 0, rs_rA0);
+ const RegStorage rs_sp = TargetPtrReg(kSp);
+ OpRegImm(kOpSub, rs_sp, frame_size_);
+ StoreWordDisp(rs_sp, frame_size_ - (cu_->target64 ? 8 : 4), TargetPtrReg(kLr));
+ StoreWordDisp(rs_sp, 0, TargetPtrReg(kArg0));
}
void MipsMir2Lir::GenSpecialExitForSuspend() {
// Pop the frame. Don't pop ArtMethod*, it's no longer needed.
- Load32Disp(rs_rMIPS_SP, frame_size_ - 4, rs_rRA);
- OpRegImm(kOpAdd, rs_rMIPS_SP, frame_size_);
+ const RegStorage rs_sp = TargetPtrReg(kSp);
+ LoadWordDisp(rs_sp, frame_size_ - (cu_->target64 ? 8 : 4), TargetPtrReg(kLr));
+ OpRegImm(kOpAdd, rs_sp, frame_size_);
}
/*
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
- int state, const MethodReference& target_method,
- uint32_t,
- uintptr_t direct_code, uintptr_t direct_method,
- InvokeType type) {
+static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, int state,
+ const MethodReference& target_method, uint32_t, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
if (direct_code != 0 && direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ if (cu->target64) {
+ cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ } else {
+ cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ }
} else {
cg->LoadCodeAddress(target_method, type, kInvokeTgt);
}
if (direct_method != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+ if (cu->target64) {
+ cg->LoadConstantWide(cg->TargetReg(kArg0, kRef), direct_method);
+ } else {
+ cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+ }
} else {
cg->LoadMethodAddress(target_method, type, kArg0);
}
@@ -377,7 +425,11 @@
// Set up direct code if known.
if (direct_code != 0) {
if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ if (cu->target64) {
+ cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ } else {
+ cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ }
} else {
CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
cg->LoadCodeAddress(target_method, type, kInvokeTgt);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index e1b43ca..713264e 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_DEX_QUICK_MIPS_CODEGEN_MIPS_H_
#define ART_COMPILER_DEX_QUICK_MIPS_CODEGEN_MIPS_H_
+#include "dex/compiler_ir.h"
#include "dex/quick/mir_to_lir.h"
#include "mips_lir.h"
@@ -39,207 +40,303 @@
size_t cur_core_reg_;
};
+ class InToRegStorageMips64Mapper : public InToRegStorageMapper {
+ public:
+ explicit InToRegStorageMips64Mapper(Mir2Lir* m2l) : m2l_(m2l), cur_arg_reg_(0) {}
+ virtual RegStorage GetNextReg(ShortyArg arg);
+ virtual void Reset() OVERRIDE {
+ cur_arg_reg_ = 0;
+ }
+ protected:
+ Mir2Lir* m2l_;
+ private:
+ size_t cur_arg_reg_;
+ };
+
+ InToRegStorageMips64Mapper in_to_reg_storage_mips64_mapper_;
InToRegStorageMipsMapper in_to_reg_storage_mips_mapper_;
InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
- in_to_reg_storage_mips_mapper_.Reset();
- return &in_to_reg_storage_mips_mapper_;
+ InToRegStorageMapper* res;
+ if (cu_->target64) {
+ res = &in_to_reg_storage_mips64_mapper_;
+ } else {
+ res = &in_to_reg_storage_mips_mapper_;
+ }
+ res->Reset();
+ return res;
}
- public:
- MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+ public:
+ MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
- // Required for target - codegen utilities.
- bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int lit);
- bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
- void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
- int32_t constant) OVERRIDE;
- void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
- int64_t constant) OVERRIDE;
- LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
- LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
- OpSize size) OVERRIDE;
- LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
- LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
- LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
- OpSize size) OVERRIDE;
- LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
- LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
+ // Required for target - codegen utilities.
+ bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+ RegLocation rl_dest, int lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, int32_t constant)
+ OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, int64_t constant)
+ OVERRIDE;
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
+ RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+ LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+ OpSize size) OVERRIDE;
+ LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
+ LIR* LoadConstantWideNoClobber(RegStorage r_dest, int64_t value);
+ LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+ OpSize size) OVERRIDE;
+ LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
+ LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
- /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
- void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
- // Required for target - register utilities.
- RegStorage Solo64ToPair64(RegStorage reg);
- RegStorage TargetReg(SpecialTargetRegister reg);
- RegLocation GetReturnAlt();
- RegLocation GetReturnWideAlt();
- RegLocation LocCReturn();
- RegLocation LocCReturnRef();
- RegLocation LocCReturnDouble();
- RegLocation LocCReturnFloat();
- RegLocation LocCReturnWide();
- ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
- void AdjustSpillMask();
- void ClobberCallerSave();
- void FreeCallTemps();
- void LockCallTemps();
- void CompilerInitializeRegAlloc();
+ // Required for target - register utilities.
+ RegStorage Solo64ToPair64(RegStorage reg);
+ RegStorage Fp64ToSolo32(RegStorage reg);
+ RegStorage TargetReg(SpecialTargetRegister reg);
+ RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE;
+ RegStorage TargetPtrReg(SpecialTargetRegister reg) OVERRIDE {
+ return TargetReg(reg, cu_->target64 ? kWide : kNotWide);
+ }
+ RegLocation GetReturnAlt();
+ RegLocation GetReturnWideAlt();
+ RegLocation LocCReturn();
+ RegLocation LocCReturnRef();
+ RegLocation LocCReturnDouble();
+ RegLocation LocCReturnFloat();
+ RegLocation LocCReturnWide();
+ ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
+ void AdjustSpillMask();
+ void ClobberCallerSave();
+ void FreeCallTemps();
+ void LockCallTemps();
+ void CompilerInitializeRegAlloc();
- // Required for target - miscellaneous.
- void AssembleLIR();
- int AssignInsnOffsets();
- void AssignOffsets();
- AssemblerStatus AssembleInstructions(CodeOffset start_addr);
- void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
- void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
- ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
- const char* GetTargetInstFmt(int opcode);
- const char* GetTargetInstName(int opcode);
- std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
- ResourceMask GetPCUseDefEncoding() const OVERRIDE;
- uint64_t GetTargetInstFlags(int opcode);
- size_t GetInsnSize(LIR* lir) OVERRIDE;
- bool IsUnconditionalBranch(LIR* lir);
+ // Required for target - miscellaneous.
+ void AssembleLIR();
+ int AssignInsnOffsets();
+ void AssignOffsets();
+ AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+ void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+ void SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+ ResourceMask* def_mask) OVERRIDE;
+ const char* GetTargetInstFmt(int opcode);
+ const char* GetTargetInstName(int opcode);
+ std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ ResourceMask GetPCUseDefEncoding() const OVERRIDE;
+ uint64_t GetTargetInstFlags(int opcode);
+ size_t GetInsnSize(LIR* lir) OVERRIDE;
+ bool IsUnconditionalBranch(LIR* lir);
- // Get the register class for load/store of a field.
- RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+ // Get the register class for load/store of a field.
+ RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
- // Required for target - Dalvik-level generators.
- void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2, int flags);
- void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_dest, int scale);
- void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
- void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_shift, int flags);
- void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
- bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
- bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
- bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
- bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
- bool GenInlinedSqrt(CallInfo* info);
- bool GenInlinedPeek(CallInfo* info, OpSize size);
- bool GenInlinedPoke(CallInfo* info, OpSize size);
- void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, int flags) OVERRIDE;
- RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
- void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivZeroCheckWide(RegStorage reg);
- void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
- void GenExitSequence();
- void GenSpecialExitSequence() OVERRIDE;
- void GenSpecialEntryForSuspend() OVERRIDE;
- void GenSpecialExitForSuspend() OVERRIDE;
- void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
- void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
- void GenSelect(BasicBlock* bb, MIR* mir);
- void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
- int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) OVERRIDE;
- bool GenMemBarrier(MemBarrierKind barrier_kind);
- void GenMoveException(RegLocation rl_dest);
- void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit);
- void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
- void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
+ // Required for target - Dalvik-level generators.
+ void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation lr_shift);
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, int flags);
+ void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_dest, int scale);
+ void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_src, int scale, bool card_mark);
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_shift, int flags);
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+ bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+ bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
+ bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
+ bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
+ bool GenInlinedSqrt(CallInfo* info);
+ bool GenInlinedPeek(CallInfo* info, OpSize size);
+ bool GenInlinedPoke(CallInfo* info, OpSize size);
+ void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, int flags) OVERRIDE;
+ RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
+ void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivZeroCheckWide(RegStorage reg);
+ void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+ void GenExitSequence();
+ void GenSpecialExitSequence() OVERRIDE;
+ void GenSpecialEntryForSuspend() OVERRIDE;
+ void GenSpecialExitForSuspend() OVERRIDE;
+ void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+ void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+ void GenSelect(BasicBlock* bb, MIR* mir);
+ void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ RegisterClass dest_reg_class) OVERRIDE;
+ bool GenMemBarrier(MemBarrierKind barrier_kind);
+ void GenMoveException(RegLocation rl_dest);
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ int first_bit, int second_bit);
+ void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+ void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
- // Required for target - single operation generators.
- LIR* OpUnconditionalBranch(LIR* target);
- LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
- LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
- LIR* OpCondBranch(ConditionCode cc, LIR* target);
- LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
- LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpIT(ConditionCode cond, const char* guide);
- void OpEndIT(LIR* it);
- LIR* OpMem(OpKind op, RegStorage r_base, int disp);
- LIR* OpPcRelLoad(RegStorage reg, LIR* target);
- LIR* OpReg(OpKind op, RegStorage r_dest_src);
- void OpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
- LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
- LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
- LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
- LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
- LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
- LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
- LIR* OpTestSuspend(LIR* target);
- LIR* OpVldm(RegStorage r_base, int count);
- LIR* OpVstm(RegStorage r_base, int count);
- void OpRegCopyWide(RegStorage dest, RegStorage src);
+ // Required for target - single operation generators.
+ LIR* OpUnconditionalBranch(LIR* target);
+ LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
+ LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
+ LIR* OpCondBranch(ConditionCode cc, LIR* target);
+ LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
+ LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpIT(ConditionCode cond, const char* guide);
+ void OpEndIT(LIR* it);
+ LIR* OpMem(OpKind op, RegStorage r_base, int disp);
+ void OpPcRelLoad(RegStorage reg, LIR* target);
+ LIR* OpReg(OpKind op, RegStorage r_dest_src);
+ void OpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
+ LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
+ LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
+ LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
+ LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
+ LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
+ LIR* OpTestSuspend(LIR* target);
+ LIR* OpVldm(RegStorage r_base, int count);
+ LIR* OpVstm(RegStorage r_base, int count);
+ void OpRegCopyWide(RegStorage dest, RegStorage src);
- // TODO: collapse r_dest.
- LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size);
- // TODO: collapse r_src.
- LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size);
- void SpillCoreRegs();
- void UnSpillCoreRegs();
- static const MipsEncodingMap EncodingMap[kMipsLast];
- bool InexpensiveConstantInt(int32_t value);
- bool InexpensiveConstantFloat(int32_t value);
- bool InexpensiveConstantLong(int64_t value);
- bool InexpensiveConstantDouble(int64_t value);
+ // TODO: collapse r_dest.
+ LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
+ // TODO: collapse r_src.
+ LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+ void SpillCoreRegs();
+ void UnSpillCoreRegs();
+ static const MipsEncodingMap EncodingMap[kMipsLast];
+ bool InexpensiveConstantInt(int32_t value);
+ bool InexpensiveConstantFloat(int32_t value);
+ bool InexpensiveConstantLong(int64_t value);
+ bool InexpensiveConstantDouble(int64_t value);
- bool WideGPRsAreAliases() const OVERRIDE {
- return false; // Wide GPRs are formed by pairing.
+ bool WideGPRsAreAliases() const OVERRIDE {
+ return cu_->target64; // Wide GPRs are formed by pairing on mips32.
+ }
+ bool WideFPRsAreAliases() const OVERRIDE {
+ return cu_->target64; // Wide FPRs are formed by pairing on mips32.
+ }
+
+ LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
+ RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_div,
+ int flags) OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) OVERRIDE;
+ NextCallInsn GetNextSDCallInsn() OVERRIDE;
+ LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
+
+ // Unimplemented intrinsics.
+ bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
+ OVERRIDE {
+ return false;
+ }
+
+ // True if isa is rev R6.
+ const bool isaIsR6_;
+
+ // True if floating point unit is 32bits.
+ const bool fpuIs32Bit_;
+
+ private:
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+
+ void ConvertShortToLongBranch(LIR* lir);
+
+ // Mips64 specific long gen methods:
+ void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div, int flags);
+ void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
+ RegisterClass reg_class);
+ RegStorage AllocPtrSizeTemp(bool required = true);
+
+ /**
+ * @param reg #RegStorage containing a Solo64 input register (e.g. @c a1 or @c d0).
+ * @return A Solo32 with the same register number as the @p reg (e.g. @c a1 or @c f0).
+ * @see As64BitReg
+ */
+ RegStorage As32BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 64b register";
+ } else {
+ LOG(WARNING) << "Expected 64b register";
+ return reg;
+ }
}
- bool WideFPRsAreAliases() const OVERRIDE {
- return false; // Wide FPRs are formed by pairing.
+ RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
+
+ /**
+ * @param reg #RegStorage containing a Solo32 input register (e.g. @c a1 or @c f0).
+ * @return A Solo64 with the same register number as the @p reg (e.g. @c a1 or @c d0).
+ */
+ RegStorage As64BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 32b register";
+ } else {
+ LOG(WARNING) << "Expected 32b register";
+ return reg;
+ }
}
+ RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
- LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
-
- RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) OVERRIDE;
- RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div)
- OVERRIDE;
-
- NextCallInsn GetNextSDCallInsn() OVERRIDE;
- LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
-
- // Unimplemented intrinsics.
- bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
+ RegStorage Check64BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 64b register";
+ } else {
+ LOG(WARNING) << "Checked for 64b register";
+ return As64BitReg(reg);
+ }
}
- bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
- OVERRIDE {
- return false;
- }
-
- private:
- void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
-
- void ConvertShortToLongBranch(LIR* lir);
+ return reg;
+ }
};
} // namespace art
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index d7ed7ac..45fd1a9 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -23,8 +23,8 @@
namespace art {
-void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
int op = kMipsNop;
RegLocation rl_result;
@@ -51,7 +51,7 @@
break;
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
- FlushAllRegs(); // Send everything to home location
+ FlushAllRegs(); // Send everything to home location.
CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
rl_result = GetReturn(kFPReg);
StoreValue(rl_dest, rl_result);
@@ -69,8 +69,8 @@
StoreValue(rl_dest, rl_result);
}
-void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
int op = kMipsNop;
RegLocation rl_result;
@@ -93,7 +93,7 @@
break;
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
- FlushAllRegs(); // Send everything to home location
+ FlushAllRegs(); // Send everything to home location.
CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
rl_result = GetReturnWide(kFPReg);
StoreValueWide(rl_dest, rl_result);
@@ -147,22 +147,22 @@
op = kMipsFcvtdw;
break;
case Instruction::FLOAT_TO_INT:
- GenConversionCall(kQuickF2iz, rl_dest, rl_src);
+ GenConversionCall(kQuickF2iz, rl_dest, rl_src, kCoreReg);
return;
case Instruction::DOUBLE_TO_INT:
- GenConversionCall(kQuickD2iz, rl_dest, rl_src);
+ GenConversionCall(kQuickD2iz, rl_dest, rl_src, kCoreReg);
return;
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(kQuickL2d, rl_dest, rl_src);
+ GenConversionCall(kQuickL2d, rl_dest, rl_src, kFPReg);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(kQuickF2l, rl_dest, rl_src);
+ GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
return;
case Instruction::LONG_TO_FLOAT:
- GenConversionCall(kQuickL2f, rl_dest, rl_src);
+ GenConversionCall(kQuickL2f, rl_dest, rl_src, kFPReg);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(kQuickD2l, rl_dest, rl_src);
+ GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -181,8 +181,32 @@
}
}
-void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
+// Get the reg storage for a wide FP. Is either a solo or a pair. Base is Mips-counted, e.g., even
+// values are valid (0, 2).
+static RegStorage GetWideArgFP(bool fpuIs32Bit, size_t base) {
+ // Think about how to make this be able to be computed. E.g., rMIPS_FARG0 + base. Right now
+ // inlining should optimize everything.
+ if (fpuIs32Bit) {
+ switch (base) {
+ case 0:
+ return RegStorage(RegStorage::k64BitPair, rFARG0, rFARG1);
+ case 2:
+ return RegStorage(RegStorage::k64BitPair, rFARG2, rFARG3);
+ }
+ } else {
+ switch (base) {
+ case 0:
+ return RegStorage(RegStorage::k64BitSolo, rFARG0);
+ case 2:
+ return RegStorage(RegStorage::k64BitSolo, rFARG2);
+ }
+ }
+ LOG(FATAL) << "Unsupported Mips.GetWideFP: " << fpuIs32Bit << " " << base;
+ UNREACHABLE();
+}
+
+void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
bool wide = true;
QuickEntrypointEnum target;
@@ -208,16 +232,23 @@
FlushAllRegs();
LockCallTemps();
if (wide) {
- RegStorage r_tmp1(RegStorage::k64BitPair, rMIPS_FARG0, rMIPS_FARG1);
- RegStorage r_tmp2(RegStorage::k64BitPair, rMIPS_FARG2, rMIPS_FARG3);
+ RegStorage r_tmp1;
+ RegStorage r_tmp2;
+ if (cu_->target64) {
+ r_tmp1 = RegStorage(RegStorage::k64BitSolo, rFARG0);
+ r_tmp2 = RegStorage(RegStorage::k64BitSolo, rFARG1);
+ } else {
+ r_tmp1 = GetWideArgFP(fpuIs32Bit_, 0);
+ r_tmp2 = GetWideArgFP(fpuIs32Bit_, 2);
+ }
LoadValueDirectWideFixed(rl_src1, r_tmp1);
LoadValueDirectWideFixed(rl_src2, r_tmp2);
} else {
- LoadValueDirectFixed(rl_src1, rs_rMIPS_FARG0);
- LoadValueDirectFixed(rl_src2, rs_rMIPS_FARG2);
+ LoadValueDirectFixed(rl_src1, rs_rFARG0);
+ LoadValueDirectFixed(rl_src2, cu_->target64 ? rs_rFARG1 : rs_rFARG2);
}
RegStorage r_tgt = LoadHelper(target);
- // NOTE: not a safepoint
+ // NOTE: not a safepoint.
OpReg(kOpBlx, r_tgt);
RegLocation rl_result = GetReturn(kCoreReg);
StoreValue(rl_dest, rl_result);
@@ -230,18 +261,30 @@
void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
- rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
+ if (cu_->target64) {
+ rl_src = LoadValue(rl_src, kFPReg);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(kMipsFnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ } else {
+ rl_src = LoadValue(rl_src, kCoreReg);
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
+ }
StoreValue(rl_dest, rl_result);
}
void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
- rl_src = LoadValueWide(rl_src, kCoreReg);
- rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
- OpRegCopy(rl_result.reg, rl_src.reg);
+ if (cu_->target64) {
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ rl_result = EvalLocWide(rl_dest, kFPReg, true);
+ NewLIR2(kMipsFnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ } else {
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
+ OpRegCopy(rl_result.reg, rl_src.reg);
+ }
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 17ac629..626b36e 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -34,6 +34,7 @@
* x < y return -1
* x > y return 1
*
+ * Mips32 implementation
* slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
* sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
* subu res, t0, t1 # res = -1:1:0 for [ < > = ]
@@ -43,26 +44,40 @@
* subu res, t0, t1
* finish:
*
+ * Mips64 implementation
+ * slt temp, x, y; # (x < y) ? 1:0
+ * slt res, y, x; # (x > y) ? 1:0
+ * subu res, res, temp; # res = -1:1:0 for [ < > = ]
+ *
*/
-void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
+void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- RegStorage t0 = AllocTemp();
- RegStorage t1 = AllocTemp();
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
- NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
- NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
- NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
- NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
- FreeTemp(t0);
- FreeTemp(t1);
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch->target = target;
- StoreValue(rl_dest, rl_result);
+ if (cu_->target64) {
+ RegStorage temp = AllocTempWide();
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR3(kMipsSlt, temp.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ NewLIR3(kMipsSlt, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+ NewLIR3(kMipsSubu, rl_result.reg.GetReg(), rl_result.reg.GetReg(), temp.GetReg());
+ FreeTemp(temp);
+ StoreValue(rl_dest, rl_result);
+ } else {
+ RegStorage t0 = AllocTemp();
+ RegStorage t1 = AllocTemp();
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
+ NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+ NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
+ NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
+ FreeTemp(t0);
+ FreeTemp(t1);
+ LIR* target = NewLIR0(kPseudoTargetLabel);
+ branch->target = target;
+ StoreValue(rl_dest, rl_result);
+ }
}
LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
@@ -134,7 +149,7 @@
LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
LIR* branch;
if (check_value != 0) {
- // TUNING: handle s16 & kCondLt/Mi case using slti
+ // TUNING: handle s16 & kCondLt/Mi case using slti.
RegStorage t_reg = AllocTemp();
LoadConstant(t_reg, check_value);
branch = OpCmpBranch(cond, reg, t_reg, target);
@@ -164,17 +179,34 @@
}
LIR* MipsMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
- // If src or dest is a pair, we'll be using low reg.
- if (r_dest.IsPair()) {
- r_dest = r_dest.GetLow();
+ LIR* res;
+ MipsOpCode opcode;
+
+ if (!cu_->target64) {
+ // If src or dest is a pair, we'll be using low reg.
+ if (r_dest.IsPair()) {
+ r_dest = r_dest.GetLow();
+ }
+ if (r_src.IsPair()) {
+ r_src = r_src.GetLow();
+ }
+ } else {
+ DCHECK(!r_dest.IsPair() && !r_src.IsPair());
}
- if (r_src.IsPair()) {
- r_src = r_src.GetLow();
- }
+
if (r_dest.IsFloat() || r_src.IsFloat())
return OpFpRegCopy(r_dest, r_src);
- LIR* res = RawLIR(current_dalvik_offset_, kMipsMove,
- r_dest.GetReg(), r_src.GetReg());
+ if (cu_->target64) {
+ // TODO: Check that r_src and r_dest are both 32 or both 64 bits length on Mips64.
+ if (r_dest.Is64Bit() || r_src.Is64Bit()) {
+ opcode = kMipsMove;
+ } else {
+ opcode = kMipsSll;
+ }
+ } else {
+ opcode = kMipsMove;
+ }
+ res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
res->flags.is_nop = true;
}
@@ -189,23 +221,44 @@
}
void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
+ if (cu_->target64) {
+ OpRegCopy(r_dest, r_src);
+ return;
+ }
if (r_dest != r_src) {
bool dest_fp = r_dest.IsFloat();
bool src_fp = r_src.IsFloat();
if (dest_fp) {
if (src_fp) {
+ // Here if both src and dest are fp registers. OpRegCopy will choose the right copy
+ // (solo or pair).
OpRegCopy(r_dest, r_src);
} else {
- /* note the operands are swapped for the mtc1 instr */
- NewLIR2(kMipsMtc1, r_src.GetLowReg(), r_dest.GetLowReg());
- NewLIR2(kMipsMtc1, r_src.GetHighReg(), r_dest.GetHighReg());
+ // note the operands are swapped for the mtc1 and mthc1 instr.
+ // Here if dest is fp reg and src is core reg.
+ if (fpuIs32Bit_) {
+ NewLIR2(kMipsMtc1, r_src.GetLowReg(), r_dest.GetLowReg());
+ NewLIR2(kMipsMtc1, r_src.GetHighReg(), r_dest.GetHighReg());
+ } else {
+ r_dest = Fp64ToSolo32(r_dest);
+ NewLIR2(kMipsMtc1, r_src.GetLowReg(), r_dest.GetReg());
+ NewLIR2(kMipsMthc1, r_src.GetHighReg(), r_dest.GetReg());
+ }
}
} else {
if (src_fp) {
- NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetLowReg());
- NewLIR2(kMipsMfc1, r_dest.GetHighReg(), r_src.GetHighReg());
+ // Here if dest is core reg and src is fp reg.
+ if (fpuIs32Bit_) {
+ NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetLowReg());
+ NewLIR2(kMipsMfc1, r_dest.GetHighReg(), r_src.GetHighReg());
+ } else {
+ r_src = Fp64ToSolo32(r_src);
+ NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetReg());
+ NewLIR2(kMipsMfhc1, r_dest.GetHighReg(), r_src.GetReg());
+ }
} else {
- // Handle overlap
+ // Here if both src and dest are core registers.
+ // Handle overlap.
if (r_src.GetHighReg() == r_dest.GetLowReg()) {
OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
OpRegCopy(r_dest.GetLow(), r_src.GetLow());
@@ -243,27 +296,21 @@
RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
bool is_div) {
- NewLIR2(kMipsDiv, reg1.GetReg(), reg2.GetReg());
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- if (is_div) {
- NewLIR1(kMipsMflo, rl_result.reg.GetReg());
+
+ if (isaIsR6_) {
+ NewLIR3(is_div ? kMipsR6Div : kMipsR6Mod, rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
} else {
- NewLIR1(kMipsMfhi, rl_result.reg.GetReg());
+ NewLIR2(kMipsR2Div, reg1.GetReg(), reg2.GetReg());
+ NewLIR1(is_div ? kMipsR2Mflo : kMipsR2Mfhi, rl_result.reg.GetReg());
}
return rl_result;
}
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit,
- bool is_div) {
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
RegStorage t_reg = AllocTemp();
NewLIR3(kMipsAddiu, t_reg.GetReg(), rZERO, lit);
- NewLIR2(kMipsDiv, reg1.GetReg(), t_reg.GetReg());
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- if (is_div) {
- NewLIR1(kMipsMflo, rl_result.reg.GetReg());
- } else {
- NewLIR1(kMipsMfhi, rl_result.reg.GetReg());
- }
+ RegLocation rl_result = GenDivRem(rl_dest, reg1, t_reg, is_div);
FreeTemp(t_reg);
return rl_result;
}
@@ -309,10 +356,17 @@
// MIPS supports only aligned access. Defer unaligned access to JNI implementation.
return false;
}
- RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
+ RegLocation rl_src_address = info->args[0]; // Long address.
+ if (!cu_->target64) {
+ rl_src_address = NarrowRegLoc(rl_src_address); // Ignore high half in info->args[1].
+ }
RegLocation rl_dest = InlineTarget(info);
- RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
+ RegLocation rl_address;
+ if (cu_->target64) {
+ rl_address = LoadValueWide(rl_src_address, kCoreReg);
+ } else {
+ rl_address = LoadValue(rl_src_address, kCoreReg);
+ }
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
DCHECK(size == kSignedByte);
LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
@@ -325,17 +379,24 @@
// MIPS supports only aligned access. Defer unaligned access to JNI implementation.
return false;
}
- RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
- RegLocation rl_src_value = info->args[2]; // [size] value
- RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
+ RegLocation rl_src_address = info->args[0]; // Long address.
+ if (!cu_->target64) {
+ rl_src_address = NarrowRegLoc(rl_src_address); // Ignore high half in info->args[1].
+ }
+ RegLocation rl_src_value = info->args[2]; // [size] value.
+ RegLocation rl_address;
+ if (cu_->target64) {
+ rl_address = LoadValueWide(rl_src_address, kCoreReg);
+ } else {
+ rl_address = LoadValue(rl_src_address, kCoreReg);
+ }
DCHECK(size == kSignedByte);
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
return true;
}
-LIR* MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+void MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
UNUSED(reg, target);
LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
UNREACHABLE();
@@ -353,8 +414,7 @@
UNREACHABLE();
}
-void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
+void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
int first_bit, int second_bit) {
UNUSED(lit);
RegStorage t_reg = AllocTemp();
@@ -367,20 +427,24 @@
}
void MipsMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
- DCHECK(reg.IsPair()); // TODO: support k64BitSolo.
- RegStorage t_reg = AllocTemp();
- OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
- GenDivZeroCheck(t_reg);
- FreeTemp(t_reg);
+ if (cu_->target64) {
+ GenDivZeroCheck(reg);
+ } else {
+ DCHECK(reg.IsPair()); // TODO: support k64BitSolo.
+ RegStorage t_reg = AllocTemp();
+ OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
+ GenDivZeroCheck(t_reg);
+ FreeTemp(t_reg);
+ }
}
-// Test suspend flag, return target of taken suspend branch
+// Test suspend flag, return target of taken suspend branch.
LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
- OpRegImm(kOpSub, rs_rMIPS_SUSPEND, 1);
- return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rs_rMIPS_SUSPEND, 0, target);
+ OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
+ return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
}
-// Decrement register and branch on condition
+// Decrement register and branch on condition.
LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
OpRegImm(kOpSub, reg, 1);
return OpCmpImmBranch(c_code, reg, 0, target);
@@ -410,9 +474,7 @@
LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
}
-void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- UNUSED(opcode);
+void MipsMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -427,15 +489,14 @@
OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src2.reg.GetLow(), rl_src1.reg.GetLow());
RegStorage t_reg = AllocTemp();
OpRegRegReg(kOpAdd, t_reg, rl_src2.reg.GetHigh(), rl_src1.reg.GetHigh());
- NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(),
+ rl_src2.reg.GetLowReg());
OpRegRegReg(kOpAdd, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- UNUSED(opcode);
+void MipsMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -458,45 +519,134 @@
void MipsMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, int flags) {
- switch (opcode) {
- case Instruction::ADD_LONG:
- case Instruction::ADD_LONG_2ADDR:
- GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::SUB_LONG:
- case Instruction::SUB_LONG_2ADDR:
- GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::NEG_LONG:
- GenNegLong(rl_dest, rl_src2);
- return;
+ if (cu_->target64) {
+ switch (opcode) {
+ case Instruction::NOT_LONG:
+ GenNotLong(rl_dest, rl_src2);
+ return;
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ GenMulLong(rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
+ return;
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
+ return;
+ case Instruction::AND_LONG:
+ case Instruction::AND_LONG_2ADDR:
+ GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::NEG_LONG:
+ GenNegLong(rl_dest, rl_src2);
+ return;
- default:
- break;
+ default:
+ LOG(FATAL) << "Invalid long arith op";
+ return;
+ }
+ } else {
+ switch (opcode) {
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ GenAddLong(rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ GenSubLong(rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::NEG_LONG:
+ GenNegLong(rl_dest, rl_src2);
+ return;
+ default:
+ break;
+ }
+ // Fallback for all other ops.
+ Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
}
+}
- // Fallback for all other ops.
- Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
+void MipsMir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegReg(kOpMvn, rl_result.reg, rl_src.reg);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(kMips64Dmul, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(opcode);
+ // TODO: Implement easy div/rem?
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+ GenDivZeroCheckWide(rl_src2.reg);
+ }
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(is_div ? kMips64Ddiv : kMips64Dmod, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
+ rl_src2.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
}
void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- /*
- * [v1 v0] = -[a1 a0]
- * negu v0,a0
- * negu v1,a1
- * sltu t1,r_zero
- * subu v1,v1,t1
- */
+ RegLocation rl_result;
- OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_src.reg.GetLow());
- OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
- RegStorage t_reg = AllocTemp();
- NewLIR3(kMipsSltu, t_reg.GetReg(), rZERO, rl_result.reg.GetLowReg());
- OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
- FreeTemp(t_reg);
- StoreValueWide(rl_dest, rl_result);
+ if (cu_->target64) {
+ rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ // [v1 v0] = -[a1 a0]
+ // negu v0,a0
+ // negu v1,a1
+ // sltu t1,r_zero
+ // subu v1,v1,t1
+ OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_src.reg.GetLow());
+ OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
+ RegStorage t_reg = AllocTemp();
+ NewLIR3(kMipsSltu, t_reg.GetReg(), rZERO, rl_result.reg.GetLowReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
+ FreeTemp(t_reg);
+ StoreValueWide(rl_dest, rl_result);
+ }
}
/*
@@ -519,18 +669,18 @@
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
- /* null object? */
+ // Null object?
GenNullCheck(rl_array.reg, opt_flags);
- RegStorage reg_ptr = AllocTemp();
+ RegStorage reg_ptr = (cu_->target64) ? AllocTempRef() : AllocTemp();
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
RegStorage reg_len;
if (needs_range_check) {
reg_len = AllocTemp();
- /* Get len */
+ // Get len.
Load32Disp(rl_array.reg, len_offset, reg_len);
}
- /* reg_ptr -> array data */
+ // reg_ptr -> array data.
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
FreeTemp(rl_array.reg);
if ((size == k64) || (size == kDouble)) {
@@ -560,7 +710,17 @@
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
+
+ if (cu_->target64) {
+ if (rl_result.ref) {
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), As32BitReg(rl_result.reg), scale,
+ kReference);
+ } else {
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
+ }
+ } else {
+ LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
+ }
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -599,7 +759,7 @@
allocated_reg_ptr_temp = true;
}
- /* null object? */
+ // Null object?
GenNullCheck(rl_array.reg, opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
@@ -607,14 +767,14 @@
if (needs_range_check) {
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
- /* Get len */
+ // Get len.
Load32Disp(rl_array.reg, len_offset, reg_len);
}
- /* reg_ptr -> array data */
+ // reg_ptr -> array data.
OpRegImm(kOpAdd, reg_ptr, data_offset);
- /* at this point, reg_ptr points to array, 2 live temps */
+ // At this point, reg_ptr points to array, 2 live temps.
if ((size == k64) || (size == kDouble)) {
- // TUNING: specific wide routine that can handle fp regs
+ // TUNING: specific wide routine that can handle fp regs.
if (scale) {
RegStorage r_new_index = AllocTemp();
OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
@@ -647,18 +807,104 @@
}
}
+void MipsMir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_shift) {
+ if (!cu_->target64) {
+ Mir2Lir::GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+ return;
+ }
+ OpKind op = kOpBkpt;
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case: " << opcode;
+ }
+ rl_shift = LoadValue(rl_shift, kCoreReg);
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
+ StoreValueWide(rl_dest, rl_result);
+}
+
void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift, int flags) {
UNUSED(flags);
- // Default implementation is just to ignore the constant case.
- GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+ if (!cu_->target64) {
+ // Default implementation is just to ignore the constant case.
+ GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+ return;
+ }
+ OpKind op = kOpBkpt;
+ // Per spec, we only care about low 6 bits of shift amount.
+ int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ if (shift_amount == 0) {
+ StoreValueWide(rl_dest, rl_src1);
+ return;
+ }
+
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case";
+ }
+ OpRegRegImm(op, rl_result.reg, rl_src1.reg, shift_amount);
+ StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- int flags) {
+void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2, int flags) {
// Default - bail to non-const handler.
GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
}
+void MipsMir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
+ if (!cu_->target64) {
+ Mir2Lir::GenIntToLong(rl_dest, rl_src);
+ return;
+ }
+ rl_src = LoadValue(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(kMipsSll, rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
+ RegLocation rl_src, RegisterClass reg_class) {
+ FlushAllRegs(); // Send everything to home location.
+ CallRuntimeHelperRegLocation(trampoline, rl_src, false);
+ if (rl_dest.wide) {
+ RegLocation rl_result;
+ rl_result = GetReturnWide(reg_class);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ RegLocation rl_result;
+ rl_result = GetReturn(reg_class);
+ StoreValue(rl_dest, rl_result);
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 66e3894..078ac0a 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -25,25 +25,29 @@
/*
* Runtime register conventions.
*
- * zero is always the value 0
- * at is scratch (normally used as temp reg by assembler)
- * v0, v1 are scratch (normally hold subroutine return values)
- * a0-a3 are scratch (normally hold subroutine arguments)
- * t0-t8 are scratch
- * t9 is scratch (normally used for function calls)
- * s0 (rMIPS_SUSPEND) is reserved [holds suspend-check counter]
- * s1 (rMIPS_SELF) is reserved [holds current &Thread]
- * s2-s7 are callee save (promotion target)
- * k0, k1 are reserved for use by interrupt handlers
- * gp is reserved for global pointer
- * sp is reserved
- * s8 is callee save (promotion target)
- * ra is scratch (normally holds the return addr)
+ * mips32 | mips64
+ * $0: zero is always the value 0
+ * $1: at is scratch (normally used as temp reg by assembler)
+ * $2,$3: v0, v1 are scratch (normally hold subroutine return values)
+ * $4-$7: a0-a3 are scratch (normally hold subroutine arguments)
+ * $8-$11: t0-t3 are scratch | a4-a7 are scratch (normally hold subroutine arguments)
+ * $12-$15: t4-t7 are scratch | t0-t3 are scratch
+ * $16: s0 (rSUSPEND) is reserved [holds suspend-check counter]
+ * $17: s1 (rSELF) is reserved [holds current &Thread]
+ * $18-$23: s2-s7 are callee save (promotion target)
+ * $24: t8 is scratch
+ * $25: t9 is scratch (normally used for function calls)
+ * $26,$27: k0, k1 are reserved for use by interrupt handlers
+ * $28: gp is reserved for global pointer
+ * $29: sp is reserved
+ * $30: s8 is callee save (promotion target)
+ * $31: ra is scratch (normally holds the return addr)
*
* Preserved across C calls: s0-s8
- * Trashed across C calls: at, v0-v1, a0-a3, t0-t9, gp, ra
+ * Trashed across C calls (mips32): at, v0-v1, a0-a3, t0-t9, gp, ra
+ * Trashed across C calls (mips64): at, v0-v1, a0-a7, t0-t3, t8, t9, gp, ra
*
- * Floating pointer registers
+ * Floating pointer registers (mips32)
* NOTE: there are 32 fp registers (16 df pairs), but currently
* only support 16 fp registers (8 df pairs).
* f0-f15
@@ -51,14 +55,23 @@
*
* f0-f15 (df0-df7) trashed across C calls
*
+ * Floating pointer registers (mips64)
+ * NOTE: there are 32 fp registers.
+ * f0-f31
+ *
* For mips32 code use:
* a0-a3 to hold operands
* v0-v1 to hold results
* t0-t9 for temps
*
+ * For mips64 code use:
+ * a0-a7 to hold operands
+ * v0-v1 to hold results
+ * t0-t3, t8-t9 for temps
+ *
* All jump/branch instructions have a delay slot after it.
*
- * Stack frame diagram (stack grows down, higher addresses at top):
+ * Stack frame diagram (stack grows down, higher addresses at top):
*
* +------------------------+
* | IN[ins-1] | {Note: resides in caller's frame}
@@ -90,18 +103,6 @@
#define LOWORD_OFFSET 0
#define HIWORD_OFFSET 4
-#define rARG0 rA0
-#define rs_rARG0 rs_rA0
-#define rARG1 rA1
-#define rs_rARG1 rs_rA1
-#define rARG2 rA2
-#define rs_rARG2 rs_rA2
-#define rARG3 rA3
-#define rs_rARG3 rs_rA3
-#define rRESULT0 rV0
-#define rs_rRESULT0 rs_rV0
-#define rRESULT1 rV1
-#define rs_rRESULT1 rs_rV1
#define rFARG0 rF12
#define rs_rFARG0 rs_rF12
@@ -111,14 +112,6 @@
#define rs_rFARG2 rs_rF14
#define rFARG3 rF15
#define rs_rFARG3 rs_rF15
-#define rFRESULT0 rF0
-#define rs_rFRESULT0 rs_rF0
-#define rFRESULT1 rF1
-#define rs_rFRESULT1 rs_rF1
-
-// Regs not used for Mips.
-#define rMIPS_LR RegStorage::kInvalidRegVal
-#define rMIPS_PC RegStorage::kInvalidRegVal
enum MipsResourceEncodingPos {
kMipsGPReg0 = 0,
@@ -130,6 +123,10 @@
kMipsRegLO,
kMipsRegPC,
kMipsRegEnd = 51,
+ // Mips64 related:
+ kMips64FPRegEnd = 64,
+ kMips64RegPC = kMips64FPRegEnd,
+ kMips64RegEnd = 65,
};
#define ENCODE_MIPS_REG_LIST(N) (static_cast<uint64_t>(N))
@@ -144,38 +141,78 @@
#define FR_BIT 0
enum MipsNativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
- rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
- rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
- rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
- rV1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
- rA0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
- rA1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
- rA2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
- rA3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
- rT0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
- rT1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
- rT2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
- rT3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
- rT4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
- rT5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
- rT6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
- rT7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
- rS0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
- rS1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
- rS2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
- rS3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
- rS4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
- rS5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
- rS6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
- rS7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
- rT8 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
- rT9 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
- rK0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
- rK1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
- rGP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
- rSP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
- rFP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
- rRA = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
+ rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
+ rZEROd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
+ rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
+ rATd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
+ rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
+ rV0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
+ rV1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
+ rV1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
+ rA0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
+ rA0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
+ rA1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
+ rA1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
+ rA2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
+ rA2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
+ rA3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
+ rA3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
+ rT0_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
+ rA4 = rT0_32,
+ rA4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
+ rT1_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
+ rA5 = rT1_32,
+ rA5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
+ rT2_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
+ rA6 = rT2_32,
+ rA6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
+ rT3_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
+ rA7 = rT3_32,
+ rA7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
+ rT4_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
+ rT0 = rT4_32,
+ rT0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
+ rT5_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
+ rT1 = rT5_32,
+ rT1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
+ rT6_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
+ rT2 = rT6_32,
+ rT2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
+ rT7_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
+ rT3 = rT7_32,
+ rT3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
+ rS0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
+ rS0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 16,
+ rS1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
+ rS1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 17,
+ rS2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
+ rS2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 18,
+ rS3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
+ rS3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 19,
+ rS4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
+ rS4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 20,
+ rS5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
+ rS5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 21,
+ rS6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
+ rS6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 22,
+ rS7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
+ rS7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 23,
+ rT8 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
+ rT8d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 24,
+ rT9 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
+ rT9d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 25,
+ rK0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
+ rK0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 26,
+ rK1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
+ rK1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 27,
+ rGP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
+ rGPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 28,
+ rSP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
+ rSPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 29,
+ rFP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
+ rFPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 30,
+ rRA = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
+ rRAd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 31,
rF0 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
rF1 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
@@ -193,6 +230,24 @@
rF13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
rF14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
rF15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
+
+ rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
+ rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
+ rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
+ rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
+ rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
+ rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
+ rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
+ rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
+ rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
+ rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
+ rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
+ rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
+ rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
+ rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
+ rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
+ rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
+
#if 0
/*
* TODO: The shared resource mask doesn't have enough bit positions to describe all
@@ -236,23 +291,56 @@
#endif
// Double precision registers where the FPU is in 64-bit mode.
rD0_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
- rD1_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
- rD2_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
- rD3_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
- rD4_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
- rD5_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
- rD6_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
- rD7_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
+ rD1_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
+ rD2_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
+ rD3_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
+ rD4_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
+ rD5_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+ rD6_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+ rD7_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
#if 0 // TODO: expand resource mask to enable use of all MIPS fp registers.
- rD8_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
- rD9_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
- rD10_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
- rD11_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
- rD12_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
- rD13_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
- rD14_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
- rD15_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
+ rD8_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
+ rD9_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
+ rD10_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
+ rD11_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
+ rD12_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
+ rD13_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
+ rD14_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
+ rD15_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
#endif
+
+ rD0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
+ rD1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
+ rD2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
+ rD3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
+ rD4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
+ rD5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
+ rD6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
+ rD7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
+ rD8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
+ rD9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
+ rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+ rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+ rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+ rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+ rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+ rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
+ rD16 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
+ rD17 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
+ rD18 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
+ rD19 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
+ rD20 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
+ rD21 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
+ rD22 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
+ rD23 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
+ rD24 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
+ rD25 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
+ rD26 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
+ rD27 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
+ rD28 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
+ rD29 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
+ rD30 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
+ rD31 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
};
constexpr RegStorage rs_rZERO(RegStorage::kValid | rZERO);
@@ -263,14 +351,22 @@
constexpr RegStorage rs_rA1(RegStorage::kValid | rA1);
constexpr RegStorage rs_rA2(RegStorage::kValid | rA2);
constexpr RegStorage rs_rA3(RegStorage::kValid | rA3);
-constexpr RegStorage rs_rT0(RegStorage::kValid | rT0);
-constexpr RegStorage rs_rT1(RegStorage::kValid | rT1);
-constexpr RegStorage rs_rT2(RegStorage::kValid | rT2);
-constexpr RegStorage rs_rT3(RegStorage::kValid | rT3);
-constexpr RegStorage rs_rT4(RegStorage::kValid | rT4);
-constexpr RegStorage rs_rT5(RegStorage::kValid | rT5);
-constexpr RegStorage rs_rT6(RegStorage::kValid | rT6);
-constexpr RegStorage rs_rT7(RegStorage::kValid | rT7);
+constexpr RegStorage rs_rT0_32(RegStorage::kValid | rT0_32);
+constexpr RegStorage rs_rA4 = rs_rT0_32;
+constexpr RegStorage rs_rT1_32(RegStorage::kValid | rT1_32);
+constexpr RegStorage rs_rA5 = rs_rT1_32;
+constexpr RegStorage rs_rT2_32(RegStorage::kValid | rT2_32);
+constexpr RegStorage rs_rA6 = rs_rT2_32;
+constexpr RegStorage rs_rT3_32(RegStorage::kValid | rT3_32);
+constexpr RegStorage rs_rA7 = rs_rT3_32;
+constexpr RegStorage rs_rT4_32(RegStorage::kValid | rT4_32);
+constexpr RegStorage rs_rT0 = rs_rT4_32;
+constexpr RegStorage rs_rT5_32(RegStorage::kValid | rT5_32);
+constexpr RegStorage rs_rT1 = rs_rT5_32;
+constexpr RegStorage rs_rT6_32(RegStorage::kValid | rT6_32);
+constexpr RegStorage rs_rT2 = rs_rT6_32;
+constexpr RegStorage rs_rT7_32(RegStorage::kValid | rT7_32);
+constexpr RegStorage rs_rT3 = rs_rT7_32;
constexpr RegStorage rs_rS0(RegStorage::kValid | rS0);
constexpr RegStorage rs_rS1(RegStorage::kValid | rS1);
constexpr RegStorage rs_rS2(RegStorage::kValid | rS2);
@@ -288,9 +384,38 @@
constexpr RegStorage rs_rFP(RegStorage::kValid | rFP);
constexpr RegStorage rs_rRA(RegStorage::kValid | rRA);
-constexpr RegStorage rs_rMIPS_LR(RegStorage::kInvalid); // Not used for MIPS.
-constexpr RegStorage rs_rMIPS_PC(RegStorage::kInvalid); // Not used for MIPS.
-constexpr RegStorage rs_rMIPS_COUNT(RegStorage::kInvalid); // Not used for MIPS.
+constexpr RegStorage rs_rZEROd(RegStorage::kValid | rZEROd);
+constexpr RegStorage rs_rATd(RegStorage::kValid | rATd);
+constexpr RegStorage rs_rV0d(RegStorage::kValid | rV0d);
+constexpr RegStorage rs_rV1d(RegStorage::kValid | rV1d);
+constexpr RegStorage rs_rA0d(RegStorage::kValid | rA0d);
+constexpr RegStorage rs_rA1d(RegStorage::kValid | rA1d);
+constexpr RegStorage rs_rA2d(RegStorage::kValid | rA2d);
+constexpr RegStorage rs_rA3d(RegStorage::kValid | rA3d);
+constexpr RegStorage rs_rA4d(RegStorage::kValid | rA4d);
+constexpr RegStorage rs_rA5d(RegStorage::kValid | rA5d);
+constexpr RegStorage rs_rA6d(RegStorage::kValid | rA6d);
+constexpr RegStorage rs_rA7d(RegStorage::kValid | rA7d);
+constexpr RegStorage rs_rT0d(RegStorage::kValid | rT0d);
+constexpr RegStorage rs_rT1d(RegStorage::kValid | rT1d);
+constexpr RegStorage rs_rT2d(RegStorage::kValid | rT2d);
+constexpr RegStorage rs_rT3d(RegStorage::kValid | rT3d);
+constexpr RegStorage rs_rS0d(RegStorage::kValid | rS0d);
+constexpr RegStorage rs_rS1d(RegStorage::kValid | rS1d);
+constexpr RegStorage rs_rS2d(RegStorage::kValid | rS2d);
+constexpr RegStorage rs_rS3d(RegStorage::kValid | rS3d);
+constexpr RegStorage rs_rS4d(RegStorage::kValid | rS4d);
+constexpr RegStorage rs_rS5d(RegStorage::kValid | rS5d);
+constexpr RegStorage rs_rS6d(RegStorage::kValid | rS6d);
+constexpr RegStorage rs_rS7d(RegStorage::kValid | rS7d);
+constexpr RegStorage rs_rT8d(RegStorage::kValid | rT8d);
+constexpr RegStorage rs_rT9d(RegStorage::kValid | rT9d);
+constexpr RegStorage rs_rK0d(RegStorage::kValid | rK0d);
+constexpr RegStorage rs_rK1d(RegStorage::kValid | rK1d);
+constexpr RegStorage rs_rGPd(RegStorage::kValid | rGPd);
+constexpr RegStorage rs_rSPd(RegStorage::kValid | rSPd);
+constexpr RegStorage rs_rFPd(RegStorage::kValid | rFPd);
+constexpr RegStorage rs_rRAd(RegStorage::kValid | rRAd);
constexpr RegStorage rs_rF0(RegStorage::kValid | rF0);
constexpr RegStorage rs_rF1(RegStorage::kValid | rF1);
@@ -309,6 +434,23 @@
constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
+constexpr RegStorage rs_rF16(RegStorage::kValid | rF16);
+constexpr RegStorage rs_rF17(RegStorage::kValid | rF17);
+constexpr RegStorage rs_rF18(RegStorage::kValid | rF18);
+constexpr RegStorage rs_rF19(RegStorage::kValid | rF19);
+constexpr RegStorage rs_rF20(RegStorage::kValid | rF20);
+constexpr RegStorage rs_rF21(RegStorage::kValid | rF21);
+constexpr RegStorage rs_rF22(RegStorage::kValid | rF22);
+constexpr RegStorage rs_rF23(RegStorage::kValid | rF23);
+constexpr RegStorage rs_rF24(RegStorage::kValid | rF24);
+constexpr RegStorage rs_rF25(RegStorage::kValid | rF25);
+constexpr RegStorage rs_rF26(RegStorage::kValid | rF26);
+constexpr RegStorage rs_rF27(RegStorage::kValid | rF27);
+constexpr RegStorage rs_rF28(RegStorage::kValid | rF28);
+constexpr RegStorage rs_rF29(RegStorage::kValid | rF29);
+constexpr RegStorage rs_rF30(RegStorage::kValid | rF30);
+constexpr RegStorage rs_rF31(RegStorage::kValid | rF31);
+
constexpr RegStorage rs_rD0_fr0(RegStorage::kValid | rD0_fr0);
constexpr RegStorage rs_rD1_fr0(RegStorage::kValid | rD1_fr0);
constexpr RegStorage rs_rD2_fr0(RegStorage::kValid | rD2_fr0);
@@ -327,51 +469,65 @@
constexpr RegStorage rs_rD6_fr1(RegStorage::kValid | rD6_fr1);
constexpr RegStorage rs_rD7_fr1(RegStorage::kValid | rD7_fr1);
-// TODO: reduce/eliminate use of these.
-#define rMIPS_SUSPEND rS0
-#define rs_rMIPS_SUSPEND rs_rS0
-#define rMIPS_SELF rS1
-#define rs_rMIPS_SELF rs_rS1
-#define rMIPS_SP rSP
-#define rs_rMIPS_SP rs_rSP
-#define rMIPS_ARG0 rARG0
-#define rs_rMIPS_ARG0 rs_rARG0
-#define rMIPS_ARG1 rARG1
-#define rs_rMIPS_ARG1 rs_rARG1
-#define rMIPS_ARG2 rARG2
-#define rs_rMIPS_ARG2 rs_rARG2
-#define rMIPS_ARG3 rARG3
-#define rs_rMIPS_ARG3 rs_rARG3
-#define rMIPS_FARG0 rFARG0
-#define rs_rMIPS_FARG0 rs_rFARG0
-#define rMIPS_FARG1 rFARG1
-#define rs_rMIPS_FARG1 rs_rFARG1
-#define rMIPS_FARG2 rFARG2
-#define rs_rMIPS_FARG2 rs_rFARG2
-#define rMIPS_FARG3 rFARG3
-#define rs_rMIPS_FARG3 rs_rFARG3
-#define rMIPS_RET0 rRESULT0
-#define rs_rMIPS_RET0 rs_rRESULT0
-#define rMIPS_RET1 rRESULT1
-#define rs_rMIPS_RET1 rs_rRESULT1
-#define rMIPS_INVOKE_TGT rT9
-#define rs_rMIPS_INVOKE_TGT rs_rT9
-#define rMIPS_COUNT RegStorage::kInvalidRegVal
+constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
+constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
+constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
+constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
+constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
+constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
+constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
+constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
+constexpr RegStorage rs_rD8(RegStorage::kValid | rD8);
+constexpr RegStorage rs_rD9(RegStorage::kValid | rD9);
+constexpr RegStorage rs_rD10(RegStorage::kValid | rD10);
+constexpr RegStorage rs_rD11(RegStorage::kValid | rD11);
+constexpr RegStorage rs_rD12(RegStorage::kValid | rD12);
+constexpr RegStorage rs_rD13(RegStorage::kValid | rD13);
+constexpr RegStorage rs_rD14(RegStorage::kValid | rD14);
+constexpr RegStorage rs_rD15(RegStorage::kValid | rD15);
+constexpr RegStorage rs_rD16(RegStorage::kValid | rD16);
+constexpr RegStorage rs_rD17(RegStorage::kValid | rD17);
+constexpr RegStorage rs_rD18(RegStorage::kValid | rD18);
+constexpr RegStorage rs_rD19(RegStorage::kValid | rD19);
+constexpr RegStorage rs_rD20(RegStorage::kValid | rD20);
+constexpr RegStorage rs_rD21(RegStorage::kValid | rD21);
+constexpr RegStorage rs_rD22(RegStorage::kValid | rD22);
+constexpr RegStorage rs_rD23(RegStorage::kValid | rD23);
+constexpr RegStorage rs_rD24(RegStorage::kValid | rD24);
+constexpr RegStorage rs_rD25(RegStorage::kValid | rD25);
+constexpr RegStorage rs_rD26(RegStorage::kValid | rD26);
+constexpr RegStorage rs_rD27(RegStorage::kValid | rD27);
+constexpr RegStorage rs_rD28(RegStorage::kValid | rD28);
+constexpr RegStorage rs_rD29(RegStorage::kValid | rD29);
+constexpr RegStorage rs_rD30(RegStorage::kValid | rD30);
+constexpr RegStorage rs_rD31(RegStorage::kValid | rD31);
// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
const RegLocation mips_loc_c_return
{kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_ref
+ {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
const RegLocation mips_loc_c_return_wide
{kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
RegStorage(RegStorage::k64BitPair, rV0, rV1), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_wide
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
const RegLocation mips_loc_c_return_float
{kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
// FIXME: move MIPS to k64Bitsolo for doubles
-const RegLocation mips_loc_c_return_double
+const RegLocation mips_loc_c_return_double_fr0
{kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
RegStorage(RegStorage::k64BitPair, rF0, rF1), INVALID_SREG, INVALID_SREG};
+const RegLocation mips_loc_c_return_double_fr1
+ {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rF0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_double
+ {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rD0), INVALID_SREG, INVALID_SREG};
enum MipsShiftEncodings {
kMipsLsl = 0x0,
@@ -393,96 +549,136 @@
#define kSY kSYNC0
/*
- * The following enum defines the list of supported Thumb instructions by the
+ * The following enum defines the list of supported mips instructions by the
* assembler. Their corresponding EncodingMap positions will be defined in
- * Assemble.cc.
+ * assemble_mips.cc.
*/
enum MipsOpCode {
kMipsFirst = 0,
+ // The following are common mips32r2, mips32r6 and mips64r6 instructions.
kMips32BitData = kMipsFirst, // data [31..0].
- kMipsAddiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
- kMipsAddu, // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
- kMipsAnd, // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
- kMipsAndi, // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
- kMipsB, // b o [0001000000000000] o[15..0].
- kMipsBal, // bal o [0000010000010001] o[15..0].
- // NOTE: the code tests the range kMipsBeq thru kMipsBne, so adding an instruction in this
- // range may require updates.
- kMipsBeq, // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
- kMipsBeqz, // beqz s,o [000100] s[25..21] [00000] o[15..0].
- kMipsBgez, // bgez s,o [000001] s[25..21] [00001] o[15..0].
- kMipsBgtz, // bgtz s,o [000111] s[25..21] [00000] o[15..0].
- kMipsBlez, // blez s,o [000110] s[25..21] [00000] o[15..0].
- kMipsBltz, // bltz s,o [000001] s[25..21] [00000] o[15..0].
- kMipsBnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
- kMipsBne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
- kMipsDiv, // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
- kMipsExt, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
- kMipsJal, // jal t [000011] t[25..0].
- kMipsJalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
- kMipsJr, // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
- kMipsLahi, // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
- kMipsLalo, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
- kMipsLui, // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
- kMipsLb, // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
- kMipsLbu, // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
- kMipsLh, // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
- kMipsLhu, // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
- kMipsLw, // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
- kMipsMfhi, // mfhi d [0000000000000000] d[15..11] [00000010000].
- kMipsMflo, // mflo d [0000000000000000] d[15..11] [00000010010].
- kMipsMove, // move d,s [000000] s[25..21] [00000] d[15..11] [00000100101].
- kMipsMovz, // movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010].
- kMipsMul, // mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010].
- kMipsNop, // nop [00000000000000000000000000000000].
- kMipsNor, // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
- kMipsOr, // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
- kMipsOri, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
- kMipsPref, // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
- kMipsSb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
- kMipsSeb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
- kMipsSeh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
- kMipsSh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
- kMipsSll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
- kMipsSllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
- kMipsSlt, // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
- kMipsSlti, // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
- kMipsSltu, // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
- kMipsSra, // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
- kMipsSrav, // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
- kMipsSrl, // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
- kMipsSrlv, // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
- kMipsSubu, // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
- kMipsSw, // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
- kMipsXor, // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
- kMipsXori, // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
- kMipsFadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
- kMipsFsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
- kMipsFmuls, // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
- kMipsFdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
- kMipsFaddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
- kMipsFsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
- kMipsFmuld, // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
- kMipsFdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
- kMipsFcvtsd, // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
- kMipsFcvtsw, // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
- kMipsFcvtds, // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
- kMipsFcvtdw, // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
- kMipsFcvtws, // cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
- kMipsFcvtwd, // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
- kMipsFmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
- kMipsFmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
- kMipsFlwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
- kMipsFldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
- kMipsFswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
- kMipsFsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
- kMipsMfc1, // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
- kMipsMtc1, // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
- kMipsDelta, // Psuedo for ori t, s, <label>-<label>.
- kMipsDeltaHi, // Pseudo for lui t, high16(<label>-<label>).
- kMipsDeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
- kMipsCurrPC, // jal to .+8 to materialize pc.
- kMipsSync, // sync kind [000000] [0000000000000000] s[10..6] [001111].
+ kMipsAddiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+ kMipsAddu, // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
+ kMipsAnd, // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
+ kMipsAndi, // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
+ kMipsB, // b o [0001000000000000] o[15..0].
+ kMipsBal, // bal o [0000010000010001] o[15..0].
+ // NOTE : the code tests the range kMipsBeq thru kMipsBne, so adding an instruction in this
+ // range may require updates.
+ kMipsBeq, // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
+ kMipsBeqz, // beqz s,o [000100] s[25..21] [00000] o[15..0].
+ kMipsBgez, // bgez s,o [000001] s[25..21] [00001] o[15..0].
+ kMipsBgtz, // bgtz s,o [000111] s[25..21] [00000] o[15..0].
+ kMipsBlez, // blez s,o [000110] s[25..21] [00000] o[15..0].
+ kMipsBltz, // bltz s,o [000001] s[25..21] [00000] o[15..0].
+ kMipsBnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
+ kMipsBne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
+ kMipsExt, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
+ kMipsFaddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
+ kMipsFadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
+ kMipsFsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
+ kMipsFsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
+ kMipsFdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
+ kMipsFdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
+ kMipsFmuld, // mul.d d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
+ kMipsFmuls, // mul.s d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
+ kMipsFcvtsd, // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
+ kMipsFcvtsw, // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
+ kMipsFcvtds, // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
+ kMipsFcvtdw, // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
+ kMipsFcvtwd, // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
+ kMipsFcvtws, // cvt.w.s d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
+ kMipsFmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
+ kMipsFmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
+ kMipsFnegd, // neg.d d,s [01000110001] [00000] s[15..11] d[10..6] [000111].
+ kMipsFnegs, // neg.s d,s [01000110000] [00000] s[15..11] d[10..6] [000111].
+ kMipsFldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
+ kMipsFlwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
+ kMipsFsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
+ kMipsFswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
+ kMipsJal, // jal t [000011] t[25..0].
+ kMipsJalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
+ kMipsJr, // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
+ kMipsLahi, // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
+ kMipsLalo, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
+ kMipsLui, // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
+ kMipsLb, // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
+ kMipsLbu, // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
+ kMipsLh, // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
+ kMipsLhu, // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
+ kMipsLw, // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
+ kMipsMove, // move d,s [000000] s[25..21] [00000] d[15..11] [00000100101].
+ kMipsMfc1, // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
+ kMipsMtc1, // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
+ kMipsMfhc1, // mfhc1 t,s [01000100011] t[20..16] s[15..11] [00000000000].
+ kMipsMthc1, // mthc1 t,s [01000100111] t[20..16] s[15..11] [00000000000].
+ kMipsNop, // nop [00000000000000000000000000000000].
+ kMipsNor, // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
+ kMipsOr, // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
+ kMipsOri, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+ kMipsPref, // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
+ kMipsSb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
+ kMipsSeb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
+ kMipsSeh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
+ kMipsSh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
+ kMipsSll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
+ kMipsSllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
+ kMipsSlt, // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
+ kMipsSlti, // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
+ kMipsSltu, // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
+ kMipsSra, // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
+ kMipsSrav, // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
+ kMipsSrl, // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
+ kMipsSrlv, // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
+ kMipsSubu, // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
+ kMipsSw, // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
+ kMipsSync, // sync kind [000000] [0000000000000000] s[10..6] [001111].
+ kMipsXor, // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
+ kMipsXori, // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
+
+ // The following are mips32r2 instructions.
+ kMipsR2Div, // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
+ kMipsR2Mul, // mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010].
+ kMipsR2Mfhi, // mfhi d [0000000000000000] d[15..11] [00000010000].
+ kMipsR2Mflo, // mflo d [0000000000000000] d[15..11] [00000010010].
+ kMipsR2Movz, // movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010].
+
+ // The following are mips32r6 and mips64r6 instructions.
+ kMipsR6Div, // div d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
+ kMipsR6Mod, // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
+ kMipsR6Mul, // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
+
+ // The following are mips64r6 instructions.
+ kMips64Daddiu, // daddiu t,s,imm16 [011001] s[25..21] t[20..16] imm16[15..11].
+ kMips64Daddu, // daddu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101101].
+ kMips64Dahi, // dahi s,imm16 [000001] s[25..21] [00110] imm16[15..11].
+ kMips64Dati, // dati s,imm16 [000001] s[25..21] [11110] imm16[15..11].
+ kMips64Daui, // daui t,s,imm16 [011101] s[25..21] t[20..16] imm16[15..11].
+ kMips64Ddiv, // ddiv d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011110].
+ kMips64Dmod, // dmod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011110].
+ kMips64Dmul, // dmul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011100].
+ kMips64Dmfc1, // dmfc1 t,s [01000100001] t[20..16] s[15..11] [00000000000].
+ kMips64Dmtc1, // dmtc1 t,s [01000100101] t[20..16] s[15..11] [00000000000].
+ kMips64Drotr32, // drotr32 d,t,a [00000000001] t[20..16] d[15..11] a[10..6] [111110].
+ kMips64Dsll, // dsll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111000].
+ kMips64Dsll32, // dsll32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111100].
+ kMips64Dsrl, // dsrl d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111010].
+ kMips64Dsrl32, // dsrl32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111110].
+ kMips64Dsra, // dsra d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111011].
+ kMips64Dsra32, // dsra32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111111].
+ kMips64Dsllv, // dsllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010100].
+ kMips64Dsrlv, // dsrlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010110].
+ kMips64Dsrav, // dsrav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010111].
+ kMips64Dsubu, // dsubu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101111].
+ kMips64Ld, // ld t,o(b) [110111] b[25..21] t[20..16] o[15..0].
+ kMips64Lwu, // lwu t,o(b) [100111] b[25..21] t[20..16] o[15..0].
+ kMips64Sd, // sd t,o(b) [111111] b[25..21] t[20..16] o[15..0].
+
+ // The following are pseudoinstructions.
+ kMipsDelta, // Psuedo for ori t, s, <label>-<label>.
+ kMipsDeltaHi, // Pseudo for lui t, high16(<label>-<label>).
+ kMipsDeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
+ kMipsCurrPC, // jal to .+8 to materialize pc.
kMipsUndefined, // undefined [011001xxxxxxxxxxxxxxxx].
kMipsLast
};
@@ -493,7 +689,7 @@
kFmtUnused,
kFmtBitBlt, // Bit string using end/start.
kFmtDfp, // Double FP reg.
- kFmtSfp, // Single FP reg
+ kFmtSfp, // Single FP reg.
kFmtBlt5_2, // Same 5-bit field to 2 locations.
};
std::ostream& operator<<(std::ostream& os, const MipsEncodingKind& rhs);
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 8574ffd..a94fad7 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -30,55 +30,131 @@
namespace art {
-static constexpr RegStorage core_regs_arr[] =
- {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0, rs_rT1, rs_rT2,
- rs_rT3, rs_rT4, rs_rT5, rs_rT6, rs_rT7, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5,
- rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
-static constexpr RegStorage sp_regs_arr[] =
+static constexpr RegStorage core_regs_arr_32[] =
+ {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32,
+ rs_rT2_32, rs_rT3_32, rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rS0, rs_rS1, rs_rS2,
+ rs_rS3, rs_rS4, rs_rS5, rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP,
+ rs_rRA};
+static constexpr RegStorage sp_regs_arr_32[] =
{rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_fr0_regs_arr[] =
+static constexpr RegStorage dp_fr0_regs_arr_32[] =
{rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
rs_rD7_fr0};
-static constexpr RegStorage dp_fr1_regs_arr[] =
+static constexpr RegStorage dp_fr1_regs_arr_32[] =
{rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
rs_rD7_fr1};
-static constexpr RegStorage reserved_regs_arr[] =
+static constexpr RegStorage reserved_regs_arr_32[] =
{rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
-static constexpr RegStorage core_temps_arr[] =
- {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rT4,
- rs_rT5, rs_rT6, rs_rT7, rs_rT8};
-static constexpr RegStorage sp_temps_arr[] =
+static constexpr RegStorage core_temps_arr_32[] =
+ {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32, rs_rT2_32, rs_rT3_32,
+ rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rT8};
+static constexpr RegStorage sp_temps_arr_32[] =
{rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_fr0_temps_arr[] =
+static constexpr RegStorage dp_fr0_temps_arr_32[] =
{rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
rs_rD7_fr0};
-static constexpr RegStorage dp_fr1_temps_arr[] =
+static constexpr RegStorage dp_fr1_temps_arr_32[] =
{rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
rs_rD7_fr1};
+static constexpr RegStorage core_regs_arr_64[] =
+ {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6,
+ rs_rA7, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5, rs_rS6,
+ rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
+static constexpr RegStorage core_regs_arr_64d[] =
+ {rs_rZEROd, rs_rATd, rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d,
+ rs_rA6d, rs_rA7d, rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rS0d, rs_rS1d, rs_rS2d, rs_rS3d,
+ rs_rS4d, rs_rS5d, rs_rS6d, rs_rS7d, rs_rT8d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd,
+ rs_rFPd, rs_rRAd};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_regs_arr_64[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+ rs_rF31};
+static constexpr RegStorage dp_regs_arr_64[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+ rs_rD31};
+#else
+static constexpr RegStorage sp_regs_arr_64[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_regs_arr_64[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23};
+#endif
+static constexpr RegStorage reserved_regs_arr_64[] =
+ {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
+static constexpr RegStorage reserved_regs_arr_64d[] =
+ {rs_rZEROd, rs_rATd, rs_rS0d, rs_rS1d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd, rs_rRAd};
+static constexpr RegStorage core_temps_arr_64[] =
+ {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6, rs_rA7, rs_rT0, rs_rT1,
+ rs_rT2, rs_rT3, rs_rT8};
+static constexpr RegStorage core_temps_arr_64d[] =
+ {rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d, rs_rA6d, rs_rA7d,
+ rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rT8d};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_temps_arr_64[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+ rs_rF31};
+static constexpr RegStorage dp_temps_arr_64[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+ rs_rD31};
+#else
+static constexpr RegStorage sp_temps_arr_64[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_temps_arr_64[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23};
+#endif
+
static constexpr ArrayRef<const RegStorage> empty_pool;
-static constexpr ArrayRef<const RegStorage> core_regs(core_regs_arr);
-static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr0_regs(dp_fr0_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr1_regs(dp_fr1_regs_arr);
-static constexpr ArrayRef<const RegStorage> reserved_regs(reserved_regs_arr);
-static constexpr ArrayRef<const RegStorage> core_temps(core_temps_arr);
-static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr0_temps(dp_fr0_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr1_temps(dp_fr1_temps_arr);
+static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr0_regs_32(dp_fr0_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr1_regs_32(dp_fr1_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr0_temps_32(dp_fr0_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr1_temps_32(dp_fr1_temps_arr_32);
+
+static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> core_regs_64d(core_regs_arr_64d);
+static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> reserved_regs_64d(reserved_regs_arr_64d);
+static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
+static constexpr ArrayRef<const RegStorage> core_temps_64d(core_temps_arr_64d);
+static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
+static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
RegLocation MipsMir2Lir::LocCReturn() {
return mips_loc_c_return;
}
RegLocation MipsMir2Lir::LocCReturnRef() {
- return mips_loc_c_return;
+ return cu_->target64 ? mips64_loc_c_return_ref : mips_loc_c_return;
}
RegLocation MipsMir2Lir::LocCReturnWide() {
- return mips_loc_c_return_wide;
+ return cu_->target64 ? mips64_loc_c_return_wide : mips_loc_c_return_wide;
}
RegLocation MipsMir2Lir::LocCReturnFloat() {
@@ -86,39 +162,83 @@
}
RegLocation MipsMir2Lir::LocCReturnDouble() {
- return mips_loc_c_return_double;
+ if (cu_->target64) {
+ return mips64_loc_c_return_double;
+ } else if (fpuIs32Bit_) {
+ return mips_loc_c_return_double_fr0;
+ } else {
+ return mips_loc_c_return_double_fr1;
+ }
}
-// Convert k64BitSolo into k64BitPair
+// Convert k64BitSolo into k64BitPair.
RegStorage MipsMir2Lir::Solo64ToPair64(RegStorage reg) {
DCHECK(reg.IsDouble());
+ DCHECK_EQ(reg.GetRegNum() & 1, 0);
int reg_num = (reg.GetRegNum() & ~1) | RegStorage::kFloatingPoint;
return RegStorage(RegStorage::k64BitPair, reg_num, reg_num + 1);
}
+// Convert 64bit FP (k64BitSolo or k64BitPair) into k32BitSolo.
+// This routine is only used to allow a 64bit FPU to access FP registers 32bits at a time.
+RegStorage MipsMir2Lir::Fp64ToSolo32(RegStorage reg) {
+ DCHECK(!fpuIs32Bit_);
+ DCHECK(reg.IsDouble());
+ DCHECK(!reg.IsPair());
+ int reg_num = reg.GetRegNum() | RegStorage::kFloatingPoint;
+ return RegStorage(RegStorage::k32BitSolo, reg_num);
+}
+
+// Return a target-dependent special register.
+RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
+ if (!cu_->target64 && wide_kind == kWide) {
+ DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
+ RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
+ TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
+ if (!fpuIs32Bit_ && ret_reg.IsFloat()) {
+ // convert 64BitPair to 64BitSolo for 64bit FPUs.
+ RegStorage low = ret_reg.GetLow();
+ ret_reg = RegStorage::FloatSolo64(low.GetRegNum());
+ }
+ return ret_reg;
+ } else if (cu_->target64 && (wide_kind == kWide || wide_kind == kRef)) {
+ return As64BitReg(TargetReg(reg));
+ } else {
+ return TargetReg(reg);
+ }
+}
+
// Return a target-dependent special register.
RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg) {
RegStorage res_reg;
switch (reg) {
- case kSelf: res_reg = rs_rMIPS_SELF; break;
- case kSuspend: res_reg = rs_rMIPS_SUSPEND; break;
- case kLr: res_reg = rs_rMIPS_LR; break;
- case kPc: res_reg = rs_rMIPS_PC; break;
- case kSp: res_reg = rs_rMIPS_SP; break;
- case kArg0: res_reg = rs_rMIPS_ARG0; break;
- case kArg1: res_reg = rs_rMIPS_ARG1; break;
- case kArg2: res_reg = rs_rMIPS_ARG2; break;
- case kArg3: res_reg = rs_rMIPS_ARG3; break;
- case kFArg0: res_reg = rs_rMIPS_FARG0; break;
- case kFArg1: res_reg = rs_rMIPS_FARG1; break;
- case kFArg2: res_reg = rs_rMIPS_FARG2; break;
- case kFArg3: res_reg = rs_rMIPS_FARG3; break;
- case kRet0: res_reg = rs_rMIPS_RET0; break;
- case kRet1: res_reg = rs_rMIPS_RET1; break;
- case kInvokeTgt: res_reg = rs_rMIPS_INVOKE_TGT; break;
- case kHiddenArg: res_reg = rs_rT0; break;
+ case kSelf: res_reg = rs_rS1; break;
+ case kSuspend: res_reg = rs_rS0; break;
+ case kLr: res_reg = rs_rRA; break;
+ case kPc: res_reg = RegStorage::InvalidReg(); break;
+ case kSp: res_reg = rs_rSP; break;
+ case kArg0: res_reg = rs_rA0; break;
+ case kArg1: res_reg = rs_rA1; break;
+ case kArg2: res_reg = rs_rA2; break;
+ case kArg3: res_reg = rs_rA3; break;
+ case kArg4: res_reg = cu_->target64 ? rs_rA4 : RegStorage::InvalidReg(); break;
+ case kArg5: res_reg = cu_->target64 ? rs_rA5 : RegStorage::InvalidReg(); break;
+ case kArg6: res_reg = cu_->target64 ? rs_rA6 : RegStorage::InvalidReg(); break;
+ case kArg7: res_reg = cu_->target64 ? rs_rA7 : RegStorage::InvalidReg(); break;
+ case kFArg0: res_reg = rs_rF12; break;
+ case kFArg1: res_reg = rs_rF13; break;
+ case kFArg2: res_reg = rs_rF14; break;
+ case kFArg3: res_reg = rs_rF15; break;
+ case kFArg4: res_reg = cu_->target64 ? rs_rF16 : RegStorage::InvalidReg(); break;
+ case kFArg5: res_reg = cu_->target64 ? rs_rF17 : RegStorage::InvalidReg(); break;
+ case kFArg6: res_reg = cu_->target64 ? rs_rF18 : RegStorage::InvalidReg(); break;
+ case kFArg7: res_reg = cu_->target64 ? rs_rF19 : RegStorage::InvalidReg(); break;
+ case kRet0: res_reg = rs_rV0; break;
+ case kRet1: res_reg = rs_rV1; break;
+ case kInvokeTgt: res_reg = rs_rT9; break;
+ case kHiddenArg: res_reg = cu_->target64 ? rs_rT0 : rs_rT0_32; break;
case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
- case kCount: res_reg = rs_rMIPS_COUNT; break;
+ case kCount: res_reg = RegStorage::InvalidReg(); break;
default: res_reg = RegStorage::InvalidReg();
}
return res_reg;
@@ -140,32 +260,54 @@
return result;
}
+RegStorage MipsMir2Lir::InToRegStorageMips64Mapper::GetNextReg(ShortyArg arg) {
+ const SpecialTargetRegister coreArgMappingToPhysicalReg[] =
+ {kArg1, kArg2, kArg3, kArg4, kArg5, kArg6, kArg7};
+ const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
+ const SpecialTargetRegister fpArgMappingToPhysicalReg[] =
+ {kFArg1, kFArg2, kFArg3, kFArg4, kFArg5, kFArg6, kFArg7};
+ const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
+
+ RegStorage result = RegStorage::InvalidReg();
+ if (arg.IsFP()) {
+ if (cur_arg_reg_ < fpArgMappingToPhysicalRegSize) {
+ DCHECK(!arg.IsRef());
+ result = m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_arg_reg_++],
+ arg.IsWide() ? kWide : kNotWide);
+ }
+ } else {
+ if (cur_arg_reg_ < coreArgMappingToPhysicalRegSize) {
+ DCHECK(!(arg.IsWide() && arg.IsRef()));
+ result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_arg_reg_++],
+ arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
+ }
+ }
+ return result;
+}
+
/*
* Decode the register id.
*/
ResourceMask MipsMir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
- if (reg.IsDouble()) {
- if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
- ->Is32BitFloatingPoint()) {
- return ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0);
- } else {
- return ResourceMask::TwoBits(reg.GetRegNum() * 2 + kMipsFPReg0);
- }
- } else if (reg.IsSingle()) {
- return ResourceMask::Bit(reg.GetRegNum() + kMipsFPReg0);
+ if (cu_->target64) {
+ return ResourceMask::Bit((reg.IsFloat() ? kMipsFPReg0 : 0) + reg.GetRegNum());
} else {
- return ResourceMask::Bit(reg.GetRegNum());
+ if (reg.IsDouble()) {
+ return ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0);
+ } else if (reg.IsSingle()) {
+ return ResourceMask::Bit(reg.GetRegNum() + kMipsFPReg0);
+ } else {
+ return ResourceMask::Bit(reg.GetRegNum());
+ }
}
}
ResourceMask MipsMir2Lir::GetPCUseDefEncoding() const {
- return ResourceMask::Bit(kMipsRegPC);
+ return cu_->target64 ? ResourceMask::Bit(kMips64RegPC) : ResourceMask::Bit(kMipsRegPC);
}
-
-void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
- ResourceMask* use_mask, ResourceMask* def_mask) {
- DCHECK_EQ(cu_->instruction_set, kMips);
+void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+ ResourceMask* def_mask) {
DCHECK(!lir->flags.use_def_invalid);
// Mips-specific resource map setup here.
@@ -181,20 +323,22 @@
def_mask->SetBit(kMipsRegLR);
}
- if (flags & REG_DEF_HI) {
- def_mask->SetBit(kMipsRegHI);
- }
+ if (!cu_->target64) {
+ if (flags & REG_DEF_HI) {
+ def_mask->SetBit(kMipsRegHI);
+ }
- if (flags & REG_DEF_LO) {
- def_mask->SetBit(kMipsRegLO);
- }
+ if (flags & REG_DEF_LO) {
+ def_mask->SetBit(kMipsRegLO);
+ }
- if (flags & REG_USE_HI) {
- use_mask->SetBit(kMipsRegHI);
- }
+ if (flags & REG_USE_HI) {
+ use_mask->SetBit(kMipsRegHI);
+ }
- if (flags & REG_USE_LO) {
- use_mask->SetBit(kMipsRegLO);
+ if (flags & REG_USE_LO) {
+ use_mask->SetBit(kMipsRegLO);
+ }
}
}
@@ -207,9 +351,16 @@
"t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
};
+static const char *mips64_reg_name[MIPS_REG_COUNT] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
+};
+
/*
* Interpret a format string and build a string no longer than size
- * See format key in Assemble.c.
+ * See format key in assemble_mips.cc.
*/
std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
std::string buf;
@@ -284,7 +435,11 @@
break;
case 'r':
DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
- strcpy(tbuf, mips_reg_name[operand]);
+ if (cu_->target64) {
+ strcpy(tbuf, mips64_reg_name[operand]);
+ } else {
+ strcpy(tbuf, mips_reg_name[operand]);
+ }
break;
case 'N':
// Placeholder for delay slot handling
@@ -303,7 +458,7 @@
return buf;
}
-// FIXME: need to redo resource maps for MIPS - fix this at that time
+// FIXME: need to redo resource maps for MIPS - fix this at that time.
void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, const ResourceMask& mask, const char *prefix) {
char buf[256];
buf[0] = 0;
@@ -314,7 +469,7 @@
char num[8];
int i;
- for (i = 0; i < kMipsRegEnd; i++) {
+ for (i = 0; i < (cu_->target64 ? kMips64RegEnd : kMipsRegEnd); i++) {
if (mask.HasBit(i)) {
snprintf(num, arraysize(num), "%d ", i);
strcat(buf, num);
@@ -327,7 +482,7 @@
if (mask.HasBit(ResourceMask::kFPStatus)) {
strcat(buf, "fpcc ");
}
- /* Memory bits */
+ // Memory bits.
if (mips_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
DECODE_ALIAS_INFO_REG(mips_lir->flags.alias_info),
@@ -362,64 +517,114 @@
/* Clobber all regs that might be used by an external C call */
void MipsMir2Lir::ClobberCallerSave() {
- Clobber(rs_rZERO);
- Clobber(rs_rAT);
- Clobber(rs_rV0);
- Clobber(rs_rV1);
- Clobber(rs_rA0);
- Clobber(rs_rA1);
- Clobber(rs_rA2);
- Clobber(rs_rA3);
- Clobber(rs_rT0);
- Clobber(rs_rT1);
- Clobber(rs_rT2);
- Clobber(rs_rT3);
- Clobber(rs_rT4);
- Clobber(rs_rT5);
- Clobber(rs_rT6);
- Clobber(rs_rT7);
- Clobber(rs_rT8);
- Clobber(rs_rT9);
- Clobber(rs_rK0);
- Clobber(rs_rK1);
- Clobber(rs_rGP);
- Clobber(rs_rFP);
- Clobber(rs_rRA);
- Clobber(rs_rF0);
- Clobber(rs_rF1);
- Clobber(rs_rF2);
- Clobber(rs_rF3);
- Clobber(rs_rF4);
- Clobber(rs_rF5);
- Clobber(rs_rF6);
- Clobber(rs_rF7);
- Clobber(rs_rF8);
- Clobber(rs_rF9);
- Clobber(rs_rF10);
- Clobber(rs_rF11);
- Clobber(rs_rF12);
- Clobber(rs_rF13);
- Clobber(rs_rF14);
- Clobber(rs_rF15);
- if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
- ->Is32BitFloatingPoint()) {
- Clobber(rs_rD0_fr0);
- Clobber(rs_rD1_fr0);
- Clobber(rs_rD2_fr0);
- Clobber(rs_rD3_fr0);
- Clobber(rs_rD4_fr0);
- Clobber(rs_rD5_fr0);
- Clobber(rs_rD6_fr0);
- Clobber(rs_rD7_fr0);
+ if (cu_->target64) {
+ Clobber(rs_rZEROd);
+ Clobber(rs_rATd);
+ Clobber(rs_rV0d);
+ Clobber(rs_rV1d);
+ Clobber(rs_rA0d);
+ Clobber(rs_rA1d);
+ Clobber(rs_rA2d);
+ Clobber(rs_rA3d);
+ Clobber(rs_rA4d);
+ Clobber(rs_rA5d);
+ Clobber(rs_rA6d);
+ Clobber(rs_rA7d);
+ Clobber(rs_rT0d);
+ Clobber(rs_rT1d);
+ Clobber(rs_rT2d);
+ Clobber(rs_rT3d);
+ Clobber(rs_rT8d);
+ Clobber(rs_rT9d);
+ Clobber(rs_rK0d);
+ Clobber(rs_rK1d);
+ Clobber(rs_rGPd);
+ Clobber(rs_rFPd);
+ Clobber(rs_rRAd);
+
+ Clobber(rs_rF0);
+ Clobber(rs_rF1);
+ Clobber(rs_rF2);
+ Clobber(rs_rF3);
+ Clobber(rs_rF4);
+ Clobber(rs_rF5);
+ Clobber(rs_rF6);
+ Clobber(rs_rF7);
+ Clobber(rs_rF8);
+ Clobber(rs_rF9);
+ Clobber(rs_rF10);
+ Clobber(rs_rF11);
+ Clobber(rs_rF12);
+ Clobber(rs_rF13);
+ Clobber(rs_rF14);
+ Clobber(rs_rF15);
+ Clobber(rs_rD0);
+ Clobber(rs_rD1);
+ Clobber(rs_rD2);
+ Clobber(rs_rD3);
+ Clobber(rs_rD4);
+ Clobber(rs_rD5);
+ Clobber(rs_rD6);
+ Clobber(rs_rD7);
} else {
- Clobber(rs_rD0_fr1);
- Clobber(rs_rD1_fr1);
- Clobber(rs_rD2_fr1);
- Clobber(rs_rD3_fr1);
- Clobber(rs_rD4_fr1);
- Clobber(rs_rD5_fr1);
- Clobber(rs_rD6_fr1);
- Clobber(rs_rD7_fr1);
+ Clobber(rs_rZERO);
+ Clobber(rs_rAT);
+ Clobber(rs_rV0);
+ Clobber(rs_rV1);
+ Clobber(rs_rA0);
+ Clobber(rs_rA1);
+ Clobber(rs_rA2);
+ Clobber(rs_rA3);
+ Clobber(rs_rT0_32);
+ Clobber(rs_rT1_32);
+ Clobber(rs_rT2_32);
+ Clobber(rs_rT3_32);
+ Clobber(rs_rT4_32);
+ Clobber(rs_rT5_32);
+ Clobber(rs_rT6_32);
+ Clobber(rs_rT7_32);
+ Clobber(rs_rT8);
+ Clobber(rs_rT9);
+ Clobber(rs_rK0);
+ Clobber(rs_rK1);
+ Clobber(rs_rGP);
+ Clobber(rs_rFP);
+ Clobber(rs_rRA);
+ Clobber(rs_rF0);
+ Clobber(rs_rF1);
+ Clobber(rs_rF2);
+ Clobber(rs_rF3);
+ Clobber(rs_rF4);
+ Clobber(rs_rF5);
+ Clobber(rs_rF6);
+ Clobber(rs_rF7);
+ Clobber(rs_rF8);
+ Clobber(rs_rF9);
+ Clobber(rs_rF10);
+ Clobber(rs_rF11);
+ Clobber(rs_rF12);
+ Clobber(rs_rF13);
+ Clobber(rs_rF14);
+ Clobber(rs_rF15);
+ if (fpuIs32Bit_) {
+ Clobber(rs_rD0_fr0);
+ Clobber(rs_rD1_fr0);
+ Clobber(rs_rD2_fr0);
+ Clobber(rs_rD3_fr0);
+ Clobber(rs_rD4_fr0);
+ Clobber(rs_rD5_fr0);
+ Clobber(rs_rD6_fr0);
+ Clobber(rs_rD7_fr0);
+ } else {
+ Clobber(rs_rD0_fr1);
+ Clobber(rs_rD1_fr1);
+ Clobber(rs_rD2_fr1);
+ Clobber(rs_rD3_fr1);
+ Clobber(rs_rD4_fr1);
+ Clobber(rs_rD5_fr1);
+ Clobber(rs_rD6_fr1);
+ Clobber(rs_rD7_fr1);
+ }
}
}
@@ -437,18 +642,30 @@
/* To be used when explicitly managing register use */
void MipsMir2Lir::LockCallTemps() {
- LockTemp(rs_rMIPS_ARG0);
- LockTemp(rs_rMIPS_ARG1);
- LockTemp(rs_rMIPS_ARG2);
- LockTemp(rs_rMIPS_ARG3);
+ LockTemp(TargetReg(kArg0));
+ LockTemp(TargetReg(kArg1));
+ LockTemp(TargetReg(kArg2));
+ LockTemp(TargetReg(kArg3));
+ if (cu_->target64) {
+ LockTemp(TargetReg(kArg4));
+ LockTemp(TargetReg(kArg5));
+ LockTemp(TargetReg(kArg6));
+ LockTemp(TargetReg(kArg7));
+ }
}
/* To be used when explicitly managing register use */
void MipsMir2Lir::FreeCallTemps() {
- FreeTemp(rs_rMIPS_ARG0);
- FreeTemp(rs_rMIPS_ARG1);
- FreeTemp(rs_rMIPS_ARG2);
- FreeTemp(rs_rMIPS_ARG3);
+ FreeTemp(TargetReg(kArg0));
+ FreeTemp(TargetReg(kArg1));
+ FreeTemp(TargetReg(kArg2));
+ FreeTemp(TargetReg(kArg3));
+ if (cu_->target64) {
+ FreeTemp(TargetReg(kArg4));
+ FreeTemp(TargetReg(kArg5));
+ FreeTemp(TargetReg(kArg6));
+ FreeTemp(TargetReg(kArg7));
+ }
FreeTemp(TargetReg(kHiddenArg));
}
@@ -462,39 +679,63 @@
}
void MipsMir2Lir::CompilerInitializeRegAlloc() {
- const bool fpu_is_32bit =
- cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
- ->Is32BitFloatingPoint();
- reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */,
- sp_regs,
- fpu_is_32bit ? dp_fr0_regs : dp_fr1_regs,
- reserved_regs, empty_pool /* reserved64 */,
- core_temps, empty_pool /* core64_temps */,
- sp_temps,
- fpu_is_32bit ? dp_fr0_temps : dp_fr1_temps));
+ if (cu_->target64) {
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64d, sp_regs_64,
+ dp_regs_64, reserved_regs_64, reserved_regs_64d,
+ core_temps_64, core_temps_64d, sp_temps_64,
+ dp_temps_64));
- // Target-specific adjustments.
-
- // Alias single precision floats to appropriate half of overlapping double.
- for (RegisterInfo* info : reg_pool_->sp_regs_) {
- int sp_reg_num = info->GetReg().GetRegNum();
- int dp_reg_num;
- if (fpu_is_32bit) {
- dp_reg_num = sp_reg_num & ~1;
- } else {
- dp_reg_num = sp_reg_num >> 1;
+ // Alias single precision floats to appropriate half of overlapping double.
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
+ int sp_reg_num = info->GetReg().GetRegNum();
+ int dp_reg_num = sp_reg_num;
+ RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+ RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+ // Double precision register's master storage should refer to itself.
+ DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+ // Redirect single precision's master storage to master.
+ info->SetMaster(dp_reg_info);
+ // Singles should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
}
- RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
- RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
- // Double precision register's master storage should refer to itself.
- DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
- // Redirect single precision's master storage to master.
- info->SetMaster(dp_reg_info);
- // Singles should show a single 32-bit mask bit, at first referring to the low half.
- DCHECK_EQ(info->StorageMask(), 0x1U);
- if (sp_reg_num & 1) {
- // For odd singles, change to user the high word of the backing double.
- info->SetStorageMask(0x2);
+
+ // Alias 32bit W registers to corresponding 64bit X registers.
+ for (RegisterInfo* info : reg_pool_->core_regs_) {
+ int d_reg_num = info->GetReg().GetRegNum();
+ RegStorage d_reg = RegStorage::Solo64(d_reg_num);
+ RegisterInfo* d_reg_info = GetRegInfo(d_reg);
+ // 64bit D register's master storage should refer to itself.
+ DCHECK_EQ(d_reg_info, d_reg_info->Master());
+ // Redirect 32bit master storage to 64bit D.
+ info->SetMaster(d_reg_info);
+ // 32bit should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ }
+ } else {
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, // core64
+ sp_regs_32,
+ fpuIs32Bit_ ? dp_fr0_regs_32 : dp_fr1_regs_32,
+ reserved_regs_32, empty_pool, // reserved64
+ core_temps_32, empty_pool, // core64_temps
+ sp_temps_32,
+ fpuIs32Bit_ ? dp_fr0_temps_32 : dp_fr1_temps_32));
+
+ // Alias single precision floats to appropriate half of overlapping double.
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
+ int sp_reg_num = info->GetReg().GetRegNum();
+ int dp_reg_num = sp_reg_num & ~1;
+ RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+ RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+ // Double precision register's master storage should refer to itself.
+ DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+ // Redirect single precision's master storage to master.
+ info->SetMaster(dp_reg_info);
+ // Singles should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ if (sp_reg_num & 1) {
+ // For odd singles, change to user the high word of the backing double.
+ info->SetStorageMask(0x2);
+ }
}
}
@@ -502,10 +743,10 @@
// TODO: adjust when we roll to hard float calling convention.
reg_pool_->next_core_reg_ = 2;
reg_pool_->next_sp_reg_ = 2;
- if (fpu_is_32bit) {
- reg_pool_->next_dp_reg_ = 2;
- } else {
+ if (cu_->target64) {
reg_pool_->next_dp_reg_ = 1;
+ } else {
+ reg_pool_->next_dp_reg_ = 2;
}
}
@@ -517,14 +758,24 @@
*/
RegStorage MipsMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
// NOTE: native pointer.
- LoadWordDisp(rs_rMIPS_SELF, GetThreadOffset<4>(trampoline).Int32Value(), rs_rT9);
- return rs_rT9;
+ if (cu_->target64) {
+ LoadWordDisp(TargetPtrReg(kSelf), GetThreadOffset<8>(trampoline).Int32Value(),
+ TargetPtrReg(kInvokeTgt));
+ } else {
+ LoadWordDisp(TargetPtrReg(kSelf), GetThreadOffset<4>(trampoline).Int32Value(),
+ TargetPtrReg(kInvokeTgt));
+ }
+ return TargetPtrReg(kInvokeTgt);
}
LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
RegStorage tmp = AllocTemp();
// NOTE: native pointer.
- LoadWordDisp(rs_rMIPS_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
+ if (cu_->target64) {
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
+ } else {
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
+ }
LIR *inst = LoadWordDisp(tmp, 0, tmp);
FreeTemp(tmp);
return inst;
@@ -532,31 +783,47 @@
LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadStore().
- DCHECK(r_dest.IsPair());
+ if (!cu_->target64) {
+ DCHECK(r_dest.IsPair());
+ }
ClobberCallerSave();
- LockCallTemps(); // Using fixed registers
+ LockCallTemps(); // Using fixed registers.
RegStorage reg_ptr = TargetReg(kArg0);
OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
RegStorage r_tgt = LoadHelper(kQuickA64Load);
LIR *ret = OpReg(kOpBlx, r_tgt);
- RegStorage reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
- OpRegCopyWide(r_dest, reg_ret);
+ RegStorage reg_ret;
+ if (cu_->target64) {
+ OpRegCopy(r_dest, TargetReg(kRet0));
+ } else {
+ reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
+ OpRegCopyWide(r_dest, reg_ret);
+ }
return ret;
}
LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadStore().
- DCHECK(r_src.IsPair());
+ if (cu_->target64) {
+ DCHECK(!r_src.IsPair());
+ } else {
+ DCHECK(r_src.IsPair());
+ }
ClobberCallerSave();
- LockCallTemps(); // Using fixed registers
+ LockCallTemps(); // Using fixed registers.
RegStorage temp_ptr = AllocTemp();
OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
RegStorage temp_value = AllocTempWide();
OpRegCopyWide(temp_value, r_src);
- RegStorage reg_ptr = TargetReg(kArg0);
- OpRegCopy(reg_ptr, temp_ptr);
- RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
- OpRegCopyWide(reg_value, temp_value);
+ if (cu_->target64) {
+ OpRegCopyWide(TargetReg(kArg0, kWide), temp_ptr);
+ OpRegCopyWide(TargetReg(kArg1, kWide), temp_value);
+ } else {
+ RegStorage reg_ptr = TargetReg(kArg0);
+ OpRegCopy(reg_ptr, temp_ptr);
+ RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ OpRegCopyWide(reg_value, temp_value);
+ }
FreeTemp(temp_ptr);
FreeTemp(temp_value);
RegStorage r_tgt = LoadHelper(kQuickA64Store);
@@ -568,12 +835,15 @@
return;
}
uint32_t mask = core_spill_mask_;
- int offset = num_core_spills_ * 4;
- OpRegImm(kOpSub, rs_rSP, offset);
+ int ptr_size = cu_->target64 ? 8 : 4;
+ int offset = num_core_spills_ * ptr_size;
+ const RegStorage rs_sp = TargetPtrReg(kSp);
+ OpRegImm(kOpSub, rs_sp, offset);
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- offset -= 4;
- Store32Disp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
+ offset -= ptr_size;
+ StoreWordDisp(rs_sp, offset,
+ cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg));
}
}
}
@@ -583,14 +853,17 @@
return;
}
uint32_t mask = core_spill_mask_;
- int offset = frame_size_;
+ int offset = frame_size_;
+ int ptr_size = cu_->target64 ? 8 : 4;
+ const RegStorage rs_sp = TargetPtrReg(kSp);
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- offset -= 4;
- Load32Disp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
+ offset -= ptr_size;
+ LoadWordDisp(rs_sp, offset,
+ cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg));
}
}
- OpRegImm(kOpAdd, rs_rSP, frame_size_);
+ OpRegImm(kOpAdd, rs_sp, frame_size_);
}
bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) {
@@ -610,7 +883,12 @@
}
MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
- : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips_mapper_(this) {
+ : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips64_mapper_(this),
+ in_to_reg_storage_mips_mapper_(this),
+ isaIsR6_(cu_->target64 ? true : cu->compiler_driver->GetInstructionSetFeatures()
+ ->AsMipsInstructionSetFeatures()->IsR6()),
+ fpuIs32Bit_(cu_->target64 ? false : cu->compiler_driver->GetInstructionSetFeatures()
+ ->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
for (int i = 0; i < kMipsLast; i++) {
DCHECK_EQ(MipsMir2Lir::EncodingMap[i].opcode, i)
<< "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 2d26922..bf0e0fc 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -26,30 +26,70 @@
namespace art {
-/* This file contains codegen for the MIPS32 ISA. */
+/* This file contains codegen for the Mips ISA */
LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
int opcode;
- /* must be both DOUBLE or both not DOUBLE */
- DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
- if (r_dest.IsDouble()) {
- opcode = kMipsFmovd;
- } else {
- if (r_dest.IsSingle()) {
- if (r_src.IsSingle()) {
- opcode = kMipsFmovs;
+ if (cu_->target64) {
+ DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
+ if (r_dest.Is64Bit()) {
+ if (r_dest.IsDouble()) {
+ if (r_src.IsDouble()) {
+ opcode = kMipsFmovd;
+ } else {
+ // Note the operands are swapped for the dmtc1 instr.
+ RegStorage t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMips64Dmtc1;
+ }
} else {
- /* note the operands are swapped for the mtc1 instr */
- RegStorage t_opnd = r_src;
- r_src = r_dest;
- r_dest = t_opnd;
- opcode = kMipsMtc1;
+ DCHECK(r_src.IsDouble());
+ opcode = kMips64Dmfc1;
}
} else {
- DCHECK(r_src.IsSingle());
- opcode = kMipsMfc1;
+ if (r_dest.IsSingle()) {
+ if (r_src.IsSingle()) {
+ opcode = kMipsFmovs;
+ } else {
+ // Note the operands are swapped for the mtc1 instr.
+ RegStorage t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMipsMtc1;
+ }
+ } else {
+ DCHECK(r_src.IsSingle());
+ opcode = kMipsMfc1;
+ }
+ }
+ } else {
+ // Must be both DOUBLE or both not DOUBLE.
+ DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
+ if (r_dest.IsDouble()) {
+ opcode = kMipsFmovd;
+ } else {
+ if (r_dest.IsSingle()) {
+ if (r_src.IsSingle()) {
+ opcode = kMipsFmovs;
+ } else {
+ // Note the operands are swapped for the mtc1 instr.
+ RegStorage t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMipsMtc1;
+ }
+ } else {
+ DCHECK(r_src.IsSingle());
+ opcode = kMipsMfc1;
+ }
}
}
- LIR* res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
+ LIR* res;
+ if (cu_->target64) {
+ res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
+ } else {
+ res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
+ }
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
res->flags.is_nop = true;
}
@@ -95,7 +135,7 @@
r_dest = AllocTemp();
}
- /* See if the value can be constructed cheaply */
+ // See if the value can be constructed cheaply.
if (value == 0) {
res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO);
} else if (IsUint<16>(value)) {
@@ -118,6 +158,117 @@
return res;
}
+LIR* MipsMir2Lir::LoadConstantWideNoClobber(RegStorage r_dest, int64_t value) {
+ LIR* res = nullptr;
+ DCHECK(r_dest.Is64Bit());
+ RegStorage r_dest_save = r_dest;
+ int is_fp_reg = r_dest.IsFloat();
+ if (is_fp_reg) {
+ DCHECK(r_dest.IsDouble());
+ r_dest = AllocTemp();
+ }
+
+ int bit31 = (value & UINT64_C(0x80000000)) != 0;
+
+ // Loads with 1 instruction.
+ if (IsUint<16>(value)) {
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
+ } else if (IsInt<16>(value)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
+ } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+ } else if (IsInt<32>(value)) {
+ // Loads with 2 instructions.
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
+ } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
+ NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
+ } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
+ (value >> 32) <= (32767 - bit31)) {
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
+ } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+ NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
+ } else {
+ int64_t tmp = value;
+ int shift_cnt = 0;
+ while ((tmp & 1) == 0) {
+ tmp >>= 1;
+ shift_cnt++;
+ }
+
+ if (IsUint<16>(tmp)) {
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else if (IsInt<16>(tmp)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else if (IsInt<32>(tmp)) {
+ // Loads with 3 instructions.
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), tmp >> 16);
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else {
+ tmp = value >> 16;
+ shift_cnt = 16;
+ while ((tmp & 1) == 0) {
+ tmp >>= 1;
+ shift_cnt++;
+ }
+
+ if (IsUint<16>(tmp)) {
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else if (IsInt<16>(tmp)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else {
+ // Loads with 3-4 instructions.
+ uint64_t tmp2 = value;
+ if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), tmp2 >> 16);
+ }
+ if ((tmp2 & 0xFFFF) != 0) {
+ if (res)
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), tmp2);
+ else
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp2);
+ }
+ if (bit31) {
+ tmp2 += UINT64_C(0x100000000);
+ }
+ if (((tmp2 >> 32) & 0xFFFF) != 0) {
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
+ }
+ if (tmp2 & UINT64_C(0x800000000000)) {
+ tmp2 += UINT64_C(0x1000000000000);
+ }
+ if ((tmp2 >> 48) != 0) {
+ NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
+ }
+ }
+ }
+ }
+
+ if (is_fp_reg) {
+ NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
+ FreeTemp(r_dest);
+ }
+ return res;
+}
+
LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
res->target = target;
@@ -136,53 +287,33 @@
default:
LOG(FATAL) << "Bad case in OpReg";
}
- return NewLIR2(opcode, rRA, r_dest_src.GetReg());
+ return NewLIR2(opcode, cu_->target64 ? rRAd : rRA, r_dest_src.GetReg());
}
LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
- LIR *res;
- bool neg = (value < 0);
- int abs_value = (neg) ? -value : value;
- bool short_form = (abs_value & 0xff) == abs_value;
- MipsOpCode opcode = kMipsNop;
- switch (op) {
- case kOpAdd:
- return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
- break;
- case kOpSub:
- return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
- break;
- default:
- LOG(FATAL) << "Bad case in OpRegImm";
- break;
- }
- if (short_form) {
- res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
+ if ((op == kOpAdd) || (op == kOpSub)) {
+ return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
} else {
- RegStorage r_scratch = AllocTemp();
- res = LoadConstant(r_scratch, value);
- if (op == kOpCmp)
- NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
- else
- NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
+ LOG(FATAL) << "Bad case in OpRegImm";
}
- return res;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
MipsOpCode opcode = kMipsNop;
+ bool is64bit = cu_->target64 && (r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit());
switch (op) {
case kOpAdd:
- opcode = kMipsAddu;
+ opcode = is64bit ? kMips64Daddu : kMipsAddu;
break;
case kOpSub:
- opcode = kMipsSubu;
+ opcode = is64bit ? kMips64Dsubu : kMipsSubu;
break;
case kOpAnd:
opcode = kMipsAnd;
break;
case kOpMul:
- opcode = kMipsMul;
+ opcode = isaIsR6_ ? kMipsR6Mul : kMipsR2Mul;
break;
case kOpOr:
opcode = kMipsOr;
@@ -191,20 +322,20 @@
opcode = kMipsXor;
break;
case kOpLsl:
- opcode = kMipsSllv;
+ opcode = is64bit ? kMips64Dsllv : kMipsSllv;
break;
case kOpLsr:
- opcode = kMipsSrlv;
+ opcode = is64bit ? kMips64Dsrlv : kMipsSrlv;
break;
case kOpAsr:
- opcode = kMipsSrav;
+ opcode = is64bit ? kMips64Dsrav : kMipsSrav;
break;
case kOpAdc:
case kOpSbc:
LOG(FATAL) << "No carry bit on MIPS";
break;
default:
- LOG(FATAL) << "bad case in OpRegRegReg";
+ LOG(FATAL) << "Bad case in OpRegRegReg";
break;
}
return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
@@ -214,36 +345,67 @@
LIR *res;
MipsOpCode opcode = kMipsNop;
bool short_form = true;
+ bool is64bit = cu_->target64 && (r_dest.Is64Bit() || r_src1.Is64Bit());
switch (op) {
case kOpAdd:
if (IS_SIMM16(value)) {
- opcode = kMipsAddiu;
+ opcode = is64bit ? kMips64Daddiu : kMipsAddiu;
} else {
short_form = false;
- opcode = kMipsAddu;
+ opcode = is64bit ? kMips64Daddu : kMipsAddu;
}
break;
case kOpSub:
if (IS_SIMM16((-value))) {
value = -value;
- opcode = kMipsAddiu;
+ opcode = is64bit ? kMips64Daddiu : kMipsAddiu;
} else {
short_form = false;
- opcode = kMipsSubu;
+ opcode = is64bit ? kMips64Dsubu : kMipsSubu;
}
break;
case kOpLsl:
- DCHECK(value >= 0 && value <= 31);
- opcode = kMipsSll;
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsll;
+ } else {
+ opcode = kMips64Dsll32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSll;
+ }
break;
case kOpLsr:
- DCHECK(value >= 0 && value <= 31);
- opcode = kMipsSrl;
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsrl;
+ } else {
+ opcode = kMips64Dsrl32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSrl;
+ }
break;
case kOpAsr:
- DCHECK(value >= 0 && value <= 31);
- opcode = kMipsSra;
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsra;
+ } else {
+ opcode = kMips64Dsra32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSra;
+ }
break;
case kOpAnd:
if (IS_UIMM16((value))) {
@@ -271,7 +433,7 @@
break;
case kOpMul:
short_form = false;
- opcode = kMipsMul;
+ opcode = isaIsR6_ ? kMipsR6Mul : kMipsR2Mul;
break;
default:
LOG(FATAL) << "Bad case in OpRegRegImm";
@@ -285,8 +447,14 @@
res = LoadConstant(r_dest, value);
NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
} else {
- RegStorage r_scratch = AllocTemp();
- res = LoadConstant(r_scratch, value);
+ RegStorage r_scratch;
+ if (is64bit) {
+ r_scratch = AllocTempWide();
+ res = LoadConstantWide(r_scratch, value);
+ } else {
+ r_scratch = AllocTemp();
+ res = LoadConstant(r_scratch, value);
+ }
NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
}
}
@@ -303,7 +471,11 @@
case kOpMvn:
return NewLIR3(kMipsNor, r_dest_src1.GetReg(), r_src2.GetReg(), rZERO);
case kOpNeg:
- return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
+ if (cu_->target64 && r_dest_src1.Is64Bit()) {
+ return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
+ } else {
+ return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
+ }
case kOpAdd:
case kOpAnd:
case kOpMul:
@@ -312,21 +484,29 @@
case kOpXor:
return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
case kOp2Byte:
- if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
- ->IsMipsIsaRevGreaterThanEqual2()) {
+ if (cu_->target64) {
res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
} else {
- res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
- OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
+ if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
+ ->IsMipsIsaRevGreaterThanEqual2()) {
+ res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
+ } else {
+ res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
+ OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
+ }
}
return res;
case kOp2Short:
- if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
- ->IsMipsIsaRevGreaterThanEqual2()) {
+ if (cu_->target64) {
res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
} else {
- res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
- OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
+ if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
+ ->IsMipsIsaRevGreaterThanEqual2()) {
+ res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
+ } else {
+ res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
+ OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
+ }
}
return res;
case kOp2Char:
@@ -359,12 +539,27 @@
LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LIR *res;
- if (!r_dest.IsPair()) {
- // Form 64-bit pair
- r_dest = Solo64ToPair64(r_dest);
+ if (cu_->target64) {
+ res = LoadConstantWideNoClobber(r_dest, value);
+ return res;
}
- res = LoadConstantNoClobber(r_dest.GetLow(), Low32Bits(value));
- LoadConstantNoClobber(r_dest.GetHigh(), High32Bits(value));
+ if (fpuIs32Bit_ || !r_dest.IsFloat()) {
+ // 32bit FPU (pairs) or loading into GPR.
+ if (!r_dest.IsPair()) {
+ // Form 64-bit pair.
+ r_dest = Solo64ToPair64(r_dest);
+ }
+ res = LoadConstantNoClobber(r_dest.GetLow(), Low32Bits(value));
+ LoadConstantNoClobber(r_dest.GetHigh(), High32Bits(value));
+ } else {
+ // Here if we have a 64bit FPU and loading into FPR.
+ RegStorage r_temp = AllocTemp();
+ r_dest = Fp64ToSolo32(r_dest);
+ res = LoadConstantNoClobber(r_dest, Low32Bits(value));
+ LoadConstantNoClobber(r_temp, High32Bits(value));
+ NewLIR2(kMipsMthc1, r_temp.GetReg(), r_dest.GetReg());
+ FreeTemp(r_temp);
+ }
return res;
}
@@ -374,7 +569,8 @@
LIR *first = NULL;
LIR *res;
MipsOpCode opcode = kMipsNop;
- RegStorage t_reg = AllocTemp();
+ bool is64bit = cu_->target64 && r_dest.Is64Bit();
+ RegStorage t_reg = is64bit ? AllocTempWide() : AllocTemp();
if (r_dest.IsFloat()) {
DCHECK(r_dest.IsSingle());
@@ -385,14 +581,34 @@
size = k32;
}
- if (!scale) {
- first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ if (cu_->target64) {
+ if (!scale) {
+ if (is64bit) {
+ first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ } else {
+ first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ }
+ } else {
+ first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+ NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ }
} else {
- first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
- NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ if (!scale) {
+ first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ } else {
+ first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+ NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ }
}
switch (size) {
+ case k64:
+ if (cu_->target64) {
+ opcode = kMips64Ld;
+ } else {
+ LOG(FATAL) << "Bad case in LoadBaseIndexed";
+ }
+ break;
case kSingle:
opcode = kMipsFlwc1;
break;
@@ -421,7 +637,7 @@
return (first) ? first : res;
}
-/* store value base base + scaled index. */
+// Store value base base + scaled index.
LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
LIR *first = NULL;
@@ -437,11 +653,12 @@
size = k32;
}
+ MipsOpCode add_opcode = cu_->target64 ? kMips64Daddu : kMipsAddu;
if (!scale) {
- first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ first = NewLIR3(add_opcode, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
} else {
first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
- NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ NewLIR3(add_opcode, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
}
switch (size) {
@@ -483,32 +700,39 @@
LIR *load2 = NULL;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
- bool pair = r_dest.IsPair();
+ bool is64bit = false;
switch (size) {
case k64:
case kDouble:
- if (!pair) {
- // Form 64-bit pair
- r_dest = Solo64ToPair64(r_dest);
- pair = 1;
+ if (cu_->target64) {
+ r_dest = Check64BitReg(r_dest);
+ if (!r_dest.IsFloat()) {
+ opcode = kMips64Ld;
+ } else {
+ opcode = kMipsFldc1;
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
}
- if (r_dest.IsFloat()) {
- DCHECK_EQ(r_dest.GetLowReg(), r_dest.GetHighReg() - 1);
- opcode = kMipsFlwc1;
- } else {
- opcode = kMipsLw;
+ is64bit = true;
+ if (fpuIs32Bit_ && !r_dest.IsPair()) {
+ // Form 64-bit pair.
+ r_dest = Solo64ToPair64(r_dest);
}
short_form = IS_SIMM16_2WORD(displacement);
- DCHECK_EQ((displacement & 0x3), 0);
- break;
+ FALLTHROUGH_INTENDED;
case k32:
case kSingle:
case kReference:
opcode = kMipsLw;
if (r_dest.IsFloat()) {
opcode = kMipsFlwc1;
- DCHECK(r_dest.IsSingle());
+ if (!is64bit) {
+ DCHECK(r_dest.IsSingle());
+ } else {
+ DCHECK(r_dest.IsDouble());
+ }
}
DCHECK_EQ((displacement & 0x3), 0);
break;
@@ -530,51 +754,94 @@
LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
}
- if (short_form) {
- if (!pair) {
+ if (cu_->target64) {
+ if (short_form) {
load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
} else {
- load = res = NewLIR3(opcode, r_dest.GetLowReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
- load2 = NewLIR3(opcode, r_dest.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
- }
- } else {
- if (pair) {
- RegStorage r_tmp = AllocTemp();
- res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
- load = NewLIR3(opcode, r_dest.GetLowReg(), LOWORD_OFFSET, r_tmp.GetReg());
- load2 = NewLIR3(opcode, r_dest.GetHighReg(), HIWORD_OFFSET, r_tmp.GetReg());
- FreeTemp(r_tmp);
- } else {
RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
if (r_tmp != r_dest)
FreeTemp(r_tmp);
}
+
+ if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+ DCHECK_EQ(r_base, TargetPtrReg(kSp));
+ AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
+ }
+ return res;
+ }
+
+ if (short_form) {
+ if (!is64bit) {
+ load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
+ } else {
+ if (fpuIs32Bit_ || !r_dest.IsFloat()) {
+ DCHECK(r_dest.IsPair());
+ load = res = NewLIR3(opcode, r_dest.GetLowReg(), displacement + LOWORD_OFFSET,
+ r_base.GetReg());
+ load2 = NewLIR3(opcode, r_dest.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
+ } else {
+ // Here if 64bit fpu and r_dest is a 64bit fp register.
+ RegStorage r_tmp = AllocTemp();
+ // FIXME: why is r_dest a 64BitPair here???
+ r_dest = Fp64ToSolo32(r_dest);
+ load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), displacement + LOWORD_OFFSET,
+ r_base.GetReg());
+ load2 = NewLIR3(kMipsLw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
+ NewLIR2(kMipsMthc1, r_tmp.GetReg(), r_dest.GetReg());
+ FreeTemp(r_tmp);
+ }
+ }
+ } else {
+ if (!is64bit) {
+ RegStorage r_tmp = (r_base == r_dest || r_dest.IsFloat()) ? AllocTemp() : r_dest;
+ res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
+ load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
+ if (r_tmp != r_dest)
+ FreeTemp(r_tmp);
+ } else {
+ RegStorage r_tmp = AllocTemp();
+ res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
+ if (fpuIs32Bit_ || !r_dest.IsFloat()) {
+ DCHECK(r_dest.IsPair());
+ load = NewLIR3(opcode, r_dest.GetLowReg(), LOWORD_OFFSET, r_tmp.GetReg());
+ load2 = NewLIR3(opcode, r_dest.GetHighReg(), HIWORD_OFFSET, r_tmp.GetReg());
+ } else {
+ // Here if 64bit fpu and r_dest is a 64bit fp register
+ r_dest = Fp64ToSolo32(r_dest);
+ load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), LOWORD_OFFSET, r_tmp.GetReg());
+ load2 = NewLIR3(kMipsLw, r_tmp.GetReg(), HIWORD_OFFSET, r_tmp.GetReg());
+ NewLIR2(kMipsMthc1, r_tmp.GetReg(), r_dest.GetReg());
+ }
+ FreeTemp(r_tmp);
+ }
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK_EQ(r_base, rs_rMIPS_SP);
- AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
- true /* is_load */, pair /* is64bit */);
- if (pair) {
+ DCHECK_EQ(r_base, TargetPtrReg(kSp));
+ AnnotateDalvikRegAccess(load, (displacement + (is64bit ? LOWORD_OFFSET : 0)) >> 2,
+ true /* is_load */, is64bit /* is64bit */);
+ if (is64bit) {
AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
- true /* is_load */, pair /* is64bit */);
+ true /* is_load */, is64bit /* is64bit */);
}
}
- return load;
+ return res;
}
-LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) {
- if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ VolatileKind is_volatile) {
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))
+ && (!cu_->target64 || displacement & 0x7)) {
+ // TODO: use lld/scd instructions for Mips64.
// Do atomic 64-bit load.
return GenAtomic64Load(r_base, displacement, r_dest);
}
// TODO: base this on target.
if (size == kWord) {
- size = k32;
+ size = cu_->target64 ? k64 : k32;
}
LIR* load;
load = LoadBaseDispBody(r_base, displacement, r_dest, size);
@@ -587,39 +854,46 @@
}
// FIXME: don't split r_dest into 2 containers.
-LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
- RegStorage r_src, OpSize size) {
+LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
LIR *res;
LIR *store = NULL;
LIR *store2 = NULL;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
- bool pair = r_src.IsPair();
+ bool is64bit = false;
switch (size) {
case k64:
case kDouble:
- if (!pair) {
- // Form 64-bit pair
- r_src = Solo64ToPair64(r_src);
- pair = 1;
+ if (cu_->target64) {
+ r_src = Check64BitReg(r_src);
+ if (!r_src.IsFloat()) {
+ opcode = kMips64Sd;
+ } else {
+ opcode = kMipsFsdc1;
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
}
- if (r_src.IsFloat()) {
- DCHECK_EQ(r_src.GetLowReg(), r_src.GetHighReg() - 1);
- opcode = kMipsFswc1;
- } else {
- opcode = kMipsSw;
+ is64bit = true;
+ if (fpuIs32Bit_ && !r_src.IsPair()) {
+ // Form 64-bit pair.
+ r_src = Solo64ToPair64(r_src);
}
short_form = IS_SIMM16_2WORD(displacement);
- DCHECK_EQ((displacement & 0x3), 0);
- break;
+ FALLTHROUGH_INTENDED;
case k32:
case kSingle:
case kReference:
opcode = kMipsSw;
if (r_src.IsFloat()) {
opcode = kMipsFswc1;
- DCHECK(r_src.IsSingle());
+ if (!is64bit) {
+ DCHECK(r_src.IsSingle());
+ } else {
+ DCHECK(r_src.IsDouble());
+ }
}
DCHECK_EQ((displacement & 0x3), 0);
break;
@@ -636,53 +910,96 @@
LOG(FATAL) << "Bad case in StoreBaseDispBody";
}
- if (short_form) {
- if (!pair) {
+ if (cu_->target64) {
+ if (short_form) {
store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
} else {
- store = res = NewLIR3(opcode, r_src.GetLowReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
- store2 = NewLIR3(opcode, r_src.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
+ RegStorage r_scratch = AllocTemp();
+ res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
+ store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
+ FreeTemp(r_scratch);
+ }
+
+ if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+ DCHECK_EQ(r_base, TargetPtrReg(kSp));
+ AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
+ }
+ return res;
+ }
+
+ if (short_form) {
+ if (!is64bit) {
+ store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
+ } else {
+ if (fpuIs32Bit_ || !r_src.IsFloat()) {
+ DCHECK(r_src.IsPair());
+ store = res = NewLIR3(opcode, r_src.GetLowReg(), displacement + LOWORD_OFFSET,
+ r_base.GetReg());
+ store2 = NewLIR3(opcode, r_src.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
+ } else {
+ // Here if 64bit fpu and r_src is a 64bit fp register
+ RegStorage r_tmp = AllocTemp();
+ r_src = Fp64ToSolo32(r_src);
+ store = res = NewLIR3(kMipsFswc1, r_src.GetReg(), displacement + LOWORD_OFFSET,
+ r_base.GetReg());
+ NewLIR2(kMipsMfhc1, r_tmp.GetReg(), r_src.GetReg());
+ store2 = NewLIR3(kMipsSw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
+ FreeTemp(r_tmp);
+ }
}
} else {
RegStorage r_scratch = AllocTemp();
res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
- if (!pair) {
+ if (!is64bit) {
store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
} else {
- store = NewLIR3(opcode, r_src.GetLowReg(), LOWORD_OFFSET, r_scratch.GetReg());
- store2 = NewLIR3(opcode, r_src.GetHighReg(), HIWORD_OFFSET, r_scratch.GetReg());
+ if (fpuIs32Bit_ || !r_src.IsFloat()) {
+ DCHECK(r_src.IsPair());
+ store = NewLIR3(opcode, r_src.GetLowReg(), LOWORD_OFFSET, r_scratch.GetReg());
+ store2 = NewLIR3(opcode, r_src.GetHighReg(), HIWORD_OFFSET, r_scratch.GetReg());
+ } else {
+ // Here if 64bit fpu and r_src is a 64bit fp register
+ RegStorage r_tmp = AllocTemp();
+ r_src = Fp64ToSolo32(r_src);
+ store = NewLIR3(kMipsFswc1, r_src.GetReg(), LOWORD_OFFSET, r_scratch.GetReg());
+ NewLIR2(kMipsMfhc1, r_tmp.GetReg(), r_src.GetReg());
+ store2 = NewLIR3(kMipsSw, r_tmp.GetReg(), HIWORD_OFFSET, r_scratch.GetReg());
+ FreeTemp(r_tmp);
+ }
}
FreeTemp(r_scratch);
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK_EQ(r_base, rs_rMIPS_SP);
- AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
- false /* is_load */, pair /* is64bit */);
- if (pair) {
+ DCHECK_EQ(r_base, TargetPtrReg(kSp));
+ AnnotateDalvikRegAccess(store, (displacement + (is64bit ? LOWORD_OFFSET : 0)) >> 2,
+ false /* is_load */, is64bit /* is64bit */);
+ if (is64bit) {
AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
- false /* is_load */, pair /* is64bit */);
+ false /* is_load */, is64bit /* is64bit */);
}
}
return res;
}
-LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) {
+LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+ VolatileKind is_volatile) {
if (is_volatile == kVolatile) {
// Ensure that prior accesses become visible to other threads first.
GenMemBarrier(kAnyStore);
}
LIR* store;
- if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+ (!cu_->target64 || displacement & 0x7))) {
+ // TODO: use lld/scd instructions for Mips64.
// Do atomic 64-bit load.
store = GenAtomic64Store(r_base, displacement, r_src);
} else {
// TODO: base this on target.
if (size == kWord) {
- size = k32;
+ size = cu_->target64 ? k64 : k32;
}
store = StoreBaseDispBody(r_base, displacement, r_src, size);
}
@@ -709,7 +1026,7 @@
}
LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- if (IsDirectEntrypoint(trampoline)) {
+ if (!cu_->target64 && IsDirectEntrypoint(trampoline)) {
// Reserve argument space on stack (for $a0-$a3) for
// entrypoints that directly reference native implementations.
// This is not safe in general, as it violates the frame size
@@ -724,4 +1041,8 @@
return OpReg(op, r_tgt);
}
+RegStorage MipsMir2Lir::AllocPtrSizeTemp(bool required) {
+ return cu_->target64 ? AllocTempWide(required) : AllocTemp(required);
+}
+
} // namespace art
diff --git a/compiler/dex/quick/mips64/assemble_mips64.cc b/compiler/dex/quick/mips64/assemble_mips64.cc
deleted file mode 100644
index 17a0ef1..0000000
--- a/compiler/dex/quick/mips64/assemble_mips64.cc
+++ /dev/null
@@ -1,898 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips64.h"
-
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "mips64_lir.h"
-
-namespace art {
-
-#define MAX_ASSEMBLER_RETRIES 50
-
-/*
- * opcode: Mips64OpCode enum
- * skeleton: pre-designated bit-pattern for this opcode
- * k0: key to applying ds/de
- * ds: dest start bit position
- * de: dest end bit position
- * k1: key to applying s1s/s1e
- * s1s: src1 start bit position
- * s1e: src1 end bit position
- * k2: key to applying s2s/s2e
- * s2s: src2 start bit position
- * s2e: src2 end bit position
- * operands: number of operands (for sanity check purposes)
- * name: mnemonic name
- * fmt: for pretty-printing
- */
-#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
- k3, k3s, k3e, flags, name, fmt, size) \
- {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
- {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
-
-/* Instruction dump string format keys: !pf, where "!" is the start
- * of the key, "p" is which numeric operand to use and "f" is the
- * print format.
- *
- * [p]ositions:
- * 0 -> operands[0] (dest)
- * 1 -> operands[1] (src1)
- * 2 -> operands[2] (src2)
- * 3 -> operands[3] (extra)
- *
- * [f]ormats:
- * h -> 4-digit hex
- * d -> decimal
- * E -> decimal*4
- * F -> decimal*2
- * c -> branch condition (beq, bne, etc.)
- * t -> pc-relative target
- * T -> pc-region target
- * u -> 1st half of bl[x] target
- * v -> 2nd half ob bl[x] target
- * R -> register list
- * s -> single precision floating point register
- * S -> double precision floating point register
- * m -> Thumb2 modified immediate
- * n -> complimented Thumb2 modified immediate
- * M -> Thumb2 16-bit zero-extended immediate
- * b -> 4-digit binary
- * N -> append a NOP
- *
- * [!] escape. To insert "!", use "!!"
- */
-/* NOTE: must be kept in sync with enum Mips64Opcode from mips64_lir.h */
-/*
- * TUNING: We're currently punting on the branch delay slots. All branch
- * instructions in this map are given a size of 8, which during assembly
- * is expanded to include a nop. This scheme should be replaced with
- * an assembler pass to fill those slots when possible.
- */
-const Mips64EncodingMap Mips64Mir2Lir::EncodingMap[kMips64Last] = {
- ENCODING_MAP(kMips6432BitData, 0x00000000,
- kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP,
- "data", "0x!0h(!0d)", 4),
- ENCODING_MAP(kMips64Addiu, 0x24000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "addiu", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Addu, 0x00000021,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "addu", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64And, 0x00000024,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "and", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Andi, 0x30000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "andi", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64B, 0x10000000,
- kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
- "b", "!0t!0N", 8),
- ENCODING_MAP(kMips64Bal, 0x04110000,
- kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR |
- NEEDS_FIXUP, "bal", "!0t!0N", 8),
- ENCODING_MAP(kMips64Beq, 0x10000000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
- NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
- ENCODING_MAP(kMips64Beqz, 0x10000000, // Same as beq above with t = $zero.
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Bgez, 0x04010000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "bgez", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Bgtz, 0x1c000000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "bgtz", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Blez, 0x18000000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "blez", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Bltz, 0x04000000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Bnez, 0x14000000, // Same as bne below with t = $zero.
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Bne, 0x14000000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
- NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
- ENCODING_MAP(kMips64Break, 0x0000000d,
- kFmtBitBlt, 25, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP, "break", "!0d", 4),
- ENCODING_MAP(kMips64Daddiu, 0x64000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "daddiu", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Daddu, 0x0000002d,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "daddu", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dahi, 0x04060000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
- "dahi", "!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64Dati, 0x041E0000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
- "dati", "!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64Daui, 0x74000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "daui", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Ddiv, 0x0000009e,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "ddiv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Div, 0x0000009a,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dmod, 0x000000de,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dmod", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dmul, 0x0000009c,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dmul", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dmfc1, 0x44200000,
- kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "dmfc1", "!0r,!1s", 4),
- ENCODING_MAP(kMips64Dmtc1, 0x44a00000,
- kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
- "dmtc1", "!0r,!1s", 4),
- ENCODING_MAP(kMips64Drotr32, 0x0000003e | (1 << 21),
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "drotr32", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsll, 0x00000038,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsll", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsll32, 0x0000003c,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsll32", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsrl, 0x0000003a,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsrl", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsrl32, 0x0000003e,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsrl32", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsra, 0x0000003b,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsra", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsra32, 0x0000003f,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsra32", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsllv, 0x00000014,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dsllv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dsrlv, 0x00000016,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dsrlv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dsrav, 0x00000017,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dsrav", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dsubu, 0x0000002f,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dsubu", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Ext, 0x7c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
- kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
- "ext", "!0r,!1r,!2d,!3D", 4),
- ENCODING_MAP(kMips64Faddd, 0x46200000,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "add.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMips64Fadds, 0x46000000,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "add.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMips64Fdivd, 0x46200003,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMips64Fdivs, 0x46000003,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMips64Fmuld, 0x46200002,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMips64Fmuls, 0x46000002,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMips64Fsubd, 0x46200001,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sub.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMips64Fsubs, 0x46000001,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sub.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMips64Fcvtsd, 0x46200020,
- kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.s.d", "!0s,!1S", 4),
- ENCODING_MAP(kMips64Fcvtsw, 0x46800020,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.s.w", "!0s,!1s", 4),
- ENCODING_MAP(kMips64Fcvtds, 0x46000021,
- kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.d.s", "!0S,!1s", 4),
- ENCODING_MAP(kMips64Fcvtdw, 0x46800021,
- kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.d.w", "!0S,!1s", 4),
- ENCODING_MAP(kMips64Fcvtws, 0x46000024,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.w.s", "!0s,!1s", 4),
- ENCODING_MAP(kMips64Fcvtwd, 0x46200024,
- kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.w.d", "!0s,!1S", 4),
- ENCODING_MAP(kMips64Fmovd, 0x46200006,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mov.d", "!0S,!1S", 4),
- ENCODING_MAP(kMips64Fmovs, 0x46000006,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mov.s", "!0s,!1s", 4),
- ENCODING_MAP(kMips64Fnegd, 0x46200007,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "neg.d", "!0S,!1S", 4),
- ENCODING_MAP(kMips64Fnegs, 0x46000007,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "neg.s", "!0s,!1s", 4),
- ENCODING_MAP(kMips64Fldc1, 0xd4000000,
- kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "ldc1", "!0S,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Flwc1, 0xc4000000,
- kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lwc1", "!0s,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Fsdc1, 0xf4000000,
- kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sdc1", "!0S,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Fswc1, 0xe4000000,
- kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "swc1", "!0s,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Jal, 0x0c000000,
- kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
- "jal", "!0T(!0E)!0N", 8),
- ENCODING_MAP(kMips64Jalr, 0x00000009,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF0_USE1,
- "jalr", "!0r,!1r!0N", 8),
- ENCODING_MAP(kMips64Lahi, 0x3c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
- "lahi/lui", "!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64Lalo, 0x34000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "lalo/ori", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Lb, 0x80000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lb", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Lbu, 0x90000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lbu", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Ld, 0xdc000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "ld", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Lh, 0x84000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lh", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Lhu, 0x94000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lhu", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Lui, 0x3c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
- "lui", "!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64Lw, 0x8c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lw", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Lwu, 0x9c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lwu", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Mfc1, 0x44000000,
- kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mfc1", "!0r,!1s", 4),
- ENCODING_MAP(kMips64Mtc1, 0x44800000,
- kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
- "mtc1", "!0r,!1s", 4),
- ENCODING_MAP(kMips64Move, 0x0000002d, // Or using zero reg.
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "move", "!0r,!1r", 4),
- ENCODING_MAP(kMips64Mod, 0x000000da,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mod", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Mul, 0x00000098,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Nop, 0x00000000,
- kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, NO_OPERAND,
- "nop", ";", 4),
- ENCODING_MAP(kMips64Nor, 0x00000027, // Used for "not" too.
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "nor", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Or, 0x00000025,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "or", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Ori, 0x34000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "ori", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Sb, 0xa0000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sb", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Sd, 0xfc000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sd", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Seb, 0x7c000420,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "seb", "!0r,!1r", 4),
- ENCODING_MAP(kMips64Seh, 0x7c000620,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "seh", "!0r,!1r", 4),
- ENCODING_MAP(kMips64Sh, 0xa4000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sh", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Sll, 0x00000000,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "sll", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Sllv, 0x00000004,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sllv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Slt, 0x0000002a,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "slt", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Slti, 0x28000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "slti", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Sltu, 0x0000002b,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sltu", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Sra, 0x00000003,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "sra", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Srav, 0x00000007,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "srav", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Srl, 0x00000002,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "srl", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Srlv, 0x00000006,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "srlv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Subu, 0x00000023, // Used for "neg" too.
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "subu", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Sw, 0xac000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sw", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Sync, 0x0000000f,
- kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP,
- "sync", ";", 4),
- ENCODING_MAP(kMips64Xor, 0x00000026,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "xor", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Xori, 0x38000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "xori", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64CurrPC, 0x04110001,
- kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
- "addiu", "ra,pc,8", 4),
- ENCODING_MAP(kMips64Delta, 0x67e00000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
- kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
- NEEDS_FIXUP, "daddiu", "!0r,ra,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64DeltaHi, 0x3c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | NEEDS_FIXUP,
- "lui", "!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64DeltaLo, 0x34000000,
- kFmtBlt5_2, 16, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0_USE0 | NEEDS_FIXUP,
- "ori", "!0r,!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64Undefined, 0x64000000,
- kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, NO_OPERAND,
- "undefined", "", 4),
-};
-
-
-/*
- * Convert a short-form branch to long form. Hopefully, this won't happen
- * very often because the PIC sequence is especially unfortunate.
- *
- * Orig conditional branch
- * -----------------------
- * beq rs,rt,target
- *
- * Long conditional branch
- * -----------------------
- * bne rs,rt,hop
- * bal .+8 ; rRA <- anchor
- * lui rAT, ((target-anchor) >> 16)
- * anchor:
- * ori rAT, rAT, ((target-anchor) & 0xffff)
- * addu rAT, rAT, rRA
- * jalr rZERO, rAT
- * hop:
- *
- * Orig unconditional branch
- * -------------------------
- * b target
- *
- * Long unconditional branch
- * -----------------------
- * bal .+8 ; rRA <- anchor
- * lui rAT, ((target-anchor) >> 16)
- * anchor:
- * ori rAT, rAT, ((target-anchor) & 0xffff)
- * addu rAT, rAT, rRA
- * jalr rZERO, rAT
- *
- *
- * NOTE: An out-of-range bal isn't supported because it should
- * never happen with the current PIC model.
- */
-void Mips64Mir2Lir::ConvertShortToLongBranch(LIR* lir) {
- // For conditional branches we'll need to reverse the sense
- bool unconditional = false;
- int opcode = lir->opcode;
- int dalvik_offset = lir->dalvik_offset;
- switch (opcode) {
- case kMips64Bal:
- LOG(FATAL) << "long branch and link unsupported";
- UNREACHABLE();
- case kMips64B:
- unconditional = true;
- break;
- case kMips64Beq: opcode = kMips64Bne; break;
- case kMips64Bne: opcode = kMips64Beq; break;
- case kMips64Beqz: opcode = kMips64Bnez; break;
- case kMips64Bgez: opcode = kMips64Bltz; break;
- case kMips64Bgtz: opcode = kMips64Blez; break;
- case kMips64Blez: opcode = kMips64Bgtz; break;
- case kMips64Bltz: opcode = kMips64Bgez; break;
- case kMips64Bnez: opcode = kMips64Beqz; break;
- default:
- LOG(FATAL) << "Unexpected branch kind " << opcode;
- UNREACHABLE();
- }
- LIR* hop_target = NULL;
- if (!unconditional) {
- hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
- LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
- lir->operands[1], 0, 0, 0, hop_target);
- InsertLIRBefore(lir, hop_branch);
- }
- LIR* curr_pc = RawLIR(dalvik_offset, kMips64CurrPC);
- InsertLIRBefore(lir, curr_pc);
- LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
- LIR* delta_hi = RawLIR(dalvik_offset, kMips64DeltaHi, rAT, 0, WrapPointer(anchor), 0, 0,
- lir->target);
- InsertLIRBefore(lir, delta_hi);
- InsertLIRBefore(lir, anchor);
- LIR* delta_lo = RawLIR(dalvik_offset, kMips64DeltaLo, rAT, 0, WrapPointer(anchor), 0, 0,
- lir->target);
- InsertLIRBefore(lir, delta_lo);
- LIR* addu = RawLIR(dalvik_offset, kMips64Addu, rAT, rAT, rRA);
- InsertLIRBefore(lir, addu);
- LIR* jalr = RawLIR(dalvik_offset, kMips64Jalr, rZERO, rAT);
- InsertLIRBefore(lir, jalr);
- if (!unconditional) {
- InsertLIRBefore(lir, hop_target);
- }
- NopLIR(lir);
-}
-
-/*
- * Assemble the LIR into binary instruction format. Note that we may
- * discover that pc-relative displacements may not fit the selected
- * instruction. In those cases we will try to substitute a new code
- * sequence or request that the trace be shortened and retried.
- */
-AssemblerStatus Mips64Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
- LIR *lir;
- AssemblerStatus res = kSuccess; // Assume success.
-
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
- if (lir->opcode < 0) {
- continue;
- }
-
- if (lir->flags.is_nop) {
- continue;
- }
-
- if (lir->flags.fixup != kFixupNone) {
- if (lir->opcode == kMips64Delta) {
- /*
- * The "Delta" pseudo-ops load the difference between
- * two pc-relative locations into a the target register
- * found in operands[0]. The delta is determined by
- * (label2 - label1), where label1 is a standard
- * kPseudoTargetLabel and is stored in operands[2].
- * If operands[3] is null, then label2 is a kPseudoTargetLabel
- * and is found in lir->target. If operands[3] is non-NULL,
- * then it is a Switch/Data table.
- */
- int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
- EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
- int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
- int delta = offset2 - offset1;
- if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
- // Fits.
- lir->operands[1] = delta;
- } else {
- // Doesn't fit - must expand to kMips64Delta[Hi|Lo] pair.
- LIR *new_delta_hi = RawLIR(lir->dalvik_offset, kMips64DeltaHi, lir->operands[0], 0,
- lir->operands[2], lir->operands[3], 0, lir->target);
- InsertLIRBefore(lir, new_delta_hi);
- LIR *new_delta_lo = RawLIR(lir->dalvik_offset, kMips64DeltaLo, lir->operands[0], 0,
- lir->operands[2], lir->operands[3], 0, lir->target);
- InsertLIRBefore(lir, new_delta_lo);
- LIR *new_addu = RawLIR(lir->dalvik_offset, kMips64Daddu, lir->operands[0],
- lir->operands[0], rRAd);
- InsertLIRBefore(lir, new_addu);
- NopLIR(lir);
- res = kRetryAll;
- }
- } else if (lir->opcode == kMips64DeltaLo) {
- int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
- EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
- int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
- int delta = offset2 - offset1;
- lir->operands[1] = delta & 0xffff;
- } else if (lir->opcode == kMips64DeltaHi) {
- int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
- EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
- int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
- int delta = offset2 - offset1;
- lir->operands[1] = (delta >> 16) & 0xffff;
- } else if (lir->opcode == kMips64B || lir->opcode == kMips64Bal) {
- LIR *target_lir = lir->target;
- CodeOffset pc = lir->offset + 4;
- CodeOffset target = target_lir->offset;
- int delta = target - pc;
- if (delta & 0x3) {
- LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
- }
- if (delta > 131068 || delta < -131069) {
- res = kRetryAll;
- ConvertShortToLongBranch(lir);
- } else {
- lir->operands[0] = delta >> 2;
- }
- } else if (lir->opcode >= kMips64Beqz && lir->opcode <= kMips64Bnez) {
- LIR *target_lir = lir->target;
- CodeOffset pc = lir->offset + 4;
- CodeOffset target = target_lir->offset;
- int delta = target - pc;
- if (delta & 0x3) {
- LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
- }
- if (delta > 131068 || delta < -131069) {
- res = kRetryAll;
- ConvertShortToLongBranch(lir);
- } else {
- lir->operands[1] = delta >> 2;
- }
- } else if (lir->opcode == kMips64Beq || lir->opcode == kMips64Bne) {
- LIR *target_lir = lir->target;
- CodeOffset pc = lir->offset + 4;
- CodeOffset target = target_lir->offset;
- int delta = target - pc;
- if (delta & 0x3) {
- LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
- }
- if (delta > 131068 || delta < -131069) {
- res = kRetryAll;
- ConvertShortToLongBranch(lir);
- } else {
- lir->operands[2] = delta >> 2;
- }
- } else if (lir->opcode == kMips64Jal) {
- CodeOffset cur_pc = (start_addr + lir->offset + 4) & ~3;
- CodeOffset target = lir->operands[0];
- /* ensure PC-region branch can be used */
- DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
- if (target & 0x3) {
- LOG(FATAL) << "Jump target not multiple of 4: " << target;
- }
- lir->operands[0] = target >> 2;
- } else if (lir->opcode == kMips64Lahi) { /* ld address hi (via lui) */
- LIR *target_lir = lir->target;
- CodeOffset target = start_addr + target_lir->offset;
- lir->operands[1] = target >> 16;
- } else if (lir->opcode == kMips64Lalo) { /* ld address lo (via ori) */
- LIR *target_lir = lir->target;
- CodeOffset target = start_addr + target_lir->offset;
- lir->operands[2] = lir->operands[2] + target;
- }
- }
-
- /*
- * If one of the pc-relative instructions expanded we'll have
- * to make another pass. Don't bother to fully assemble the
- * instruction.
- */
- if (res != kSuccess) {
- continue;
- }
- DCHECK(!IsPseudoLirOp(lir->opcode));
- const Mips64EncodingMap *encoder = &EncodingMap[lir->opcode];
- uint32_t bits = encoder->skeleton;
- int i;
- for (i = 0; i < 4; i++) {
- uint32_t operand;
- uint32_t value;
- operand = lir->operands[i];
- switch (encoder->field_loc[i].kind) {
- case kFmtUnused:
- break;
- case kFmtBitBlt:
- if (encoder->field_loc[i].start == 0 && encoder->field_loc[i].end == 31) {
- value = operand;
- } else {
- value = (operand << encoder->field_loc[i].start) &
- ((1 << (encoder->field_loc[i].end + 1)) - 1);
- }
- bits |= value;
- break;
- case kFmtBlt5_2:
- value = (operand & 0x1f);
- bits |= (value << encoder->field_loc[i].start);
- bits |= (value << encoder->field_loc[i].end);
- break;
- case kFmtDfp: {
- // TODO: do we need to adjust now that we're using 64BitSolo?
- DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
- value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
- ((1 << (encoder->field_loc[i].end + 1)) - 1);
- bits |= value;
- break;
- }
- case kFmtSfp:
- DCHECK(RegStorage::IsSingle(operand)) << ", Operand = 0x" << std::hex << operand;
- value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
- ((1 << (encoder->field_loc[i].end + 1)) - 1);
- bits |= value;
- break;
- default:
- LOG(FATAL) << "Bad encoder format: " << encoder->field_loc[i].kind;
- }
- }
- // We only support little-endian MIPS64.
- code_buffer_.push_back(bits & 0xff);
- code_buffer_.push_back((bits >> 8) & 0xff);
- code_buffer_.push_back((bits >> 16) & 0xff);
- code_buffer_.push_back((bits >> 24) & 0xff);
- // TUNING: replace with proper delay slot handling.
- if (encoder->size == 8) {
- DCHECK(!IsPseudoLirOp(lir->opcode));
- const Mips64EncodingMap *encoder2 = &EncodingMap[kMips64Nop];
- uint32_t bits2 = encoder2->skeleton;
- code_buffer_.push_back(bits2 & 0xff);
- code_buffer_.push_back((bits2 >> 8) & 0xff);
- code_buffer_.push_back((bits2 >> 16) & 0xff);
- code_buffer_.push_back((bits2 >> 24) & 0xff);
- }
- }
- return res;
-}
-
-size_t Mips64Mir2Lir::GetInsnSize(LIR* lir) {
- DCHECK(!IsPseudoLirOp(lir->opcode));
- return EncodingMap[lir->opcode].size;
-}
-
-// LIR offset assignment.
-// TODO: consolidate w/ Arm assembly mechanism.
-int Mips64Mir2Lir::AssignInsnOffsets() {
- LIR* lir;
- int offset = 0;
-
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
- lir->offset = offset;
- if (LIKELY(lir->opcode >= 0)) {
- if (!lir->flags.is_nop) {
- offset += lir->flags.size;
- }
- } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
- if (offset & 0x2) {
- offset += 2;
- lir->operands[0] = 1;
- } else {
- lir->operands[0] = 0;
- }
- }
- // Pseudo opcodes don't consume space.
- }
- return offset;
-}
-
-/*
- * Walk the compilation unit and assign offsets to instructions
- * and literals and compute the total size of the compiled unit.
- * TODO: consolidate w/ Arm assembly mechanism.
- */
-void Mips64Mir2Lir::AssignOffsets() {
- int offset = AssignInsnOffsets();
-
- // Const values have to be word aligned.
- offset = RoundUp(offset, 4);
-
- // Set up offsets for literals.
- data_offset_ = offset;
-
- offset = AssignLiteralOffset(offset);
-
- offset = AssignSwitchTablesOffset(offset);
-
- offset = AssignFillArrayDataOffset(offset);
-
- total_size_ = offset;
-}
-
-/*
- * Go over each instruction in the list and calculate the offset from the top
- * before sending them off to the assembler. If out-of-range branch distance is
- * seen rearrange the instructions a bit to correct it.
- * TODO: consolidate w/ Arm assembly mechanism.
- */
-void Mips64Mir2Lir::AssembleLIR() {
- cu_->NewTimingSplit("Assemble");
- AssignOffsets();
- int assembler_retries = 0;
- /*
- * Assemble here. Note that we generate code with optimistic assumptions
- * and if found now to work, we'll have to redo the sequence and retry.
- */
-
- while (true) {
- AssemblerStatus res = AssembleInstructions(0);
- if (res == kSuccess) {
- break;
- } else {
- assembler_retries++;
- if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
- CodegenDump();
- LOG(FATAL) << "Assembler error - too many retries";
- }
- // Redo offsets and try again.
- AssignOffsets();
- code_buffer_.clear();
- }
- }
-
- // Install literals.
- InstallLiteralPools();
-
- // Install switch tables.
- InstallSwitchTables();
-
- // Install fill array data.
- InstallFillArrayData();
-
- // Create the mapping table and native offset to reference map.
- cu_->NewTimingSplit("PcMappingTable");
- CreateMappingTables();
-
- cu_->NewTimingSplit("GcMap");
- CreateNativeGcMap();
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips64/backend_mips64.h b/compiler/dex/quick/mips64/backend_mips64.h
deleted file mode 100644
index cc30ae0..0000000
--- a/compiler/dex/quick/mips64/backend_mips64.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
-#define ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
-
-namespace art {
-
-struct CompilationUnit;
-class Mir2Lir;
-class MIRGraph;
-class ArenaAllocator;
-
-Mir2Lir* Mips64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
- ArenaAllocator* const arena);
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
diff --git a/compiler/dex/quick/mips64/call_mips64.cc b/compiler/dex/quick/mips64/call_mips64.cc
deleted file mode 100644
index 31be1c2..0000000
--- a/compiler/dex/quick/mips64/call_mips64.cc
+++ /dev/null
@@ -1,421 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the Mips64 ISA */
-
-#include "codegen_mips64.h"
-
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "gc/accounting/card_table.h"
-#include "mips64_lir.h"
-#include "mirror/art_method.h"
-#include "mirror/object_array-inl.h"
-
-namespace art {
-
-bool Mips64Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
- // TODO
- UNUSED(bb, mir, special);
- return false;
-}
-
-/*
- * The lack of pc-relative loads on Mips64 presents somewhat of a challenge
- * for our PIC switch table strategy. To materialize the current location
- * we'll do a dummy JAL and reference our tables using rRA as the
- * base register. Note that rRA will be used both as the base to
- * locate the switch table data and as the reference base for the switch
- * target offsets stored in the table. We'll use a special pseudo-instruction
- * to represent the jal and trigger the construction of the
- * switch table offsets (which will happen after final assembly and all
- * labels are fixed).
- *
- * The test loop will look something like:
- *
- * ori r_end, rZERO, #table_size ; size in bytes
- * jal BaseLabel ; stores "return address" (BaseLabel) in rRA
- * nop ; opportunistically fill
- * BaseLabel:
- * addiu r_base, rRA, <table> - <BaseLabel> ; table relative to BaseLabel
- addu r_end, r_end, r_base ; end of table
- * lw r_val, [rSP, v_reg_off] ; Test Value
- * loop:
- * beq r_base, r_end, done
- * lw r_key, 0(r_base)
- * addu r_base, 8
- * bne r_val, r_key, loop
- * lw r_disp, -4(r_base)
- * addu rRA, r_disp
- * jalr rZERO, rRA
- * done:
- *
- */
-void Mips64Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
- const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- // Add the table to the list - we'll process it later.
- SwitchTable* tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable),
- kArenaAllocData));
- tab_rec->switch_mir = mir;
- tab_rec->table = table;
- tab_rec->vaddr = current_dalvik_offset_;
- int elements = table[1];
- switch_tables_.push_back(tab_rec);
-
- // The table is composed of 8-byte key/disp pairs.
- int byte_size = elements * 8;
-
- int size_hi = byte_size >> 16;
- int size_lo = byte_size & 0xffff;
-
- RegStorage r_end = AllocTempWide();
- if (size_hi) {
- NewLIR2(kMips64Lui, r_end.GetReg(), size_hi);
- }
- // Must prevent code motion for the curr pc pair.
- GenBarrier(); // Scheduling barrier.
- NewLIR0(kMips64CurrPC); // Really a jal to .+8.
- // Now, fill the branch delay slot.
- if (size_hi) {
- NewLIR3(kMips64Ori, r_end.GetReg(), r_end.GetReg(), size_lo);
- } else {
- NewLIR3(kMips64Ori, r_end.GetReg(), rZERO, size_lo);
- }
- GenBarrier(); // Scheduling barrier.
-
- // Construct BaseLabel and set up table base register.
- LIR* base_label = NewLIR0(kPseudoTargetLabel);
- // Remember base label so offsets can be computed later.
- tab_rec->anchor = base_label;
- RegStorage r_base = AllocTempWide();
- NewLIR4(kMips64Delta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
- OpRegRegReg(kOpAdd, r_end, r_end, r_base);
-
- // Grab switch test value.
- rl_src = LoadValue(rl_src, kCoreReg);
-
- // Test loop.
- RegStorage r_key = AllocTemp();
- LIR* loop_label = NewLIR0(kPseudoTargetLabel);
- LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
- Load32Disp(r_base, 0, r_key);
- OpRegImm(kOpAdd, r_base, 8);
- OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
- RegStorage r_disp = AllocTemp();
- Load32Disp(r_base, -4, r_disp);
- OpRegRegReg(kOpAdd, TargetReg(kLr, kWide), TargetReg(kLr, kWide), r_disp);
- OpReg(kOpBx, TargetReg(kLr, kWide));
-
- // Loop exit.
- LIR* exit_label = NewLIR0(kPseudoTargetLabel);
- exit_branch->target = exit_label;
-}
-
-/*
- * Code pattern will look something like:
- *
- * lw r_val
- * jal BaseLabel ; stores "return address" (BaseLabel) in rRA
- * nop ; opportunistically fill
- * [subiu r_val, bias] ; Remove bias if low_val != 0
- * bound check -> done
- * lw r_disp, [rRA, r_val]
- * addu rRA, r_disp
- * jalr rZERO, rRA
- * done:
- */
-void Mips64Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
- const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- // Add the table to the list - we'll process it later.
- SwitchTable* tab_rec =
- static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
- tab_rec->switch_mir = mir;
- tab_rec->table = table;
- tab_rec->vaddr = current_dalvik_offset_;
- int size = table[1];
- switch_tables_.push_back(tab_rec);
-
- // Get the switch value.
- rl_src = LoadValue(rl_src, kCoreReg);
-
- // Prepare the bias. If too big, handle 1st stage here.
- int low_key = s4FromSwitchData(&table[2]);
- bool large_bias = false;
- RegStorage r_key;
- if (low_key == 0) {
- r_key = rl_src.reg;
- } else if ((low_key & 0xffff) != low_key) {
- r_key = AllocTemp();
- LoadConstant(r_key, low_key);
- large_bias = true;
- } else {
- r_key = AllocTemp();
- }
-
- // Must prevent code motion for the curr pc pair.
- GenBarrier();
- NewLIR0(kMips64CurrPC); // Really a jal to .+8.
- // Now, fill the branch delay slot with bias strip.
- if (low_key == 0) {
- NewLIR0(kMips64Nop);
- } else {
- if (large_bias) {
- OpRegRegReg(kOpSub, r_key, rl_src.reg, r_key);
- } else {
- OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key);
- }
- }
- GenBarrier(); // Scheduling barrier.
-
- // Construct BaseLabel and set up table base register.
- LIR* base_label = NewLIR0(kPseudoTargetLabel);
- // Remember base label so offsets can be computed later.
- tab_rec->anchor = base_label;
-
- // Bounds check - if < 0 or >= size continue following switch.
- LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
-
- // Materialize the table base pointer.
- RegStorage r_base = AllocTempWide();
- NewLIR4(kMips64Delta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
-
- // Load the displacement from the switch table.
- RegStorage r_disp = AllocTemp();
- LoadBaseIndexed(r_base, r_key, r_disp, 2, k32);
-
- // Add to rAP and go.
- OpRegRegReg(kOpAdd, TargetReg(kLr, kWide), TargetReg(kLr, kWide), r_disp);
- OpReg(kOpBx, TargetReg(kLr, kWide));
-
- // Branch_over target here.
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = target;
-}
-
-void Mips64Mir2Lir::GenMoveException(RegLocation rl_dest) {
- int ex_offset = Thread::ExceptionOffset<8>().Int32Value();
- RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
- RegStorage reset_reg = AllocTempRef();
- LoadRefDisp(rs_rMIPS64_SELF, ex_offset, rl_result.reg, kNotVolatile);
- LoadConstant(reset_reg, 0);
- StoreRefDisp(rs_rMIPS64_SELF, ex_offset, reset_reg, kNotVolatile);
- FreeTemp(reset_reg);
- StoreValue(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
- RegStorage reg_card_base = AllocTempWide();
- RegStorage reg_card_no = AllocTempWide();
- // NOTE: native pointer.
- LoadWordDisp(rs_rMIPS64_SELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
- OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
- StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 0, kUnsignedByte);
- FreeTemp(reg_card_base);
- FreeTemp(reg_card_no);
-}
-
-void Mips64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
- int spill_count = num_core_spills_ + num_fp_spills_;
- /*
- * On entry, rMIPS64_ARG0, rMIPS64_ARG1, rMIPS64_ARG2, rMIPS64_ARG3,
- * rMIPS64_ARG4, rMIPS64_ARG5, rMIPS64_ARG6 & rMIPS64_ARG7 are live.
- * Let the register allocation mechanism know so it doesn't try to
- * use any of them when expanding the frame or flushing.
- */
- LockTemp(rs_rMIPS64_ARG0);
- LockTemp(rs_rMIPS64_ARG1);
- LockTemp(rs_rMIPS64_ARG2);
- LockTemp(rs_rMIPS64_ARG3);
- LockTemp(rs_rMIPS64_ARG4);
- LockTemp(rs_rMIPS64_ARG5);
- LockTemp(rs_rMIPS64_ARG6);
- LockTemp(rs_rMIPS64_ARG7);
-
- /*
- * We can safely skip the stack overflow check if we're
- * a leaf *and* our frame size < fudge factor.
- */
- bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_,
- kMips64);
- NewLIR0(kPseudoMethodEntry);
- RegStorage check_reg = AllocTempWide();
- RegStorage new_sp = AllocTempWide();
- if (!skip_overflow_check) {
- // Load stack limit.
- LoadWordDisp(rs_rMIPS64_SELF, Thread::StackEndOffset<8>().Int32Value(), check_reg);
- }
- // Spill core callee saves.
- SpillCoreRegs();
- // NOTE: promotion of FP regs currently unsupported, thus no FP spill.
- DCHECK_EQ(num_fp_spills_, 0);
- const int frame_sub = frame_size_ - spill_count * 8;
- if (!skip_overflow_check) {
- class StackOverflowSlowPath : public LIRSlowPath {
- public:
- StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) {
- }
- void Compile() OVERRIDE {
- m2l_->ResetRegPool();
- m2l_->ResetDefTracking();
- GenerateTargetLabel(kPseudoThrowTarget);
- // Load RA from the top of the frame.
- m2l_->LoadWordDisp(rs_rMIPS64_SP, sp_displace_ - 8, rs_rRAd);
- m2l_->OpRegImm(kOpAdd, rs_rMIPS64_SP, sp_displace_);
- m2l_->ClobberCallerSave();
- RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow); // Doesn't clobber LR.
- m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
- false /* UseLink */);
- }
-
- private:
- const size_t sp_displace_;
- };
- OpRegRegImm(kOpSub, new_sp, rs_rMIPS64_SP, frame_sub);
- LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
- AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 8));
- // TODO: avoid copy for small frame sizes.
- OpRegCopy(rs_rMIPS64_SP, new_sp); // Establish stack.
- } else {
- OpRegImm(kOpSub, rs_rMIPS64_SP, frame_sub);
- }
-
- FlushIns(ArgLocs, rl_method);
-
- FreeTemp(rs_rMIPS64_ARG0);
- FreeTemp(rs_rMIPS64_ARG1);
- FreeTemp(rs_rMIPS64_ARG2);
- FreeTemp(rs_rMIPS64_ARG3);
- FreeTemp(rs_rMIPS64_ARG4);
- FreeTemp(rs_rMIPS64_ARG5);
- FreeTemp(rs_rMIPS64_ARG6);
- FreeTemp(rs_rMIPS64_ARG7);
-}
-
-void Mips64Mir2Lir::GenExitSequence() {
- /*
- * In the exit path, rMIPS64_RET0/rMIPS64_RET1 are live - make sure they aren't
- * allocated by the register utilities as temps.
- */
- LockTemp(rs_rMIPS64_RET0);
- LockTemp(rs_rMIPS64_RET1);
-
- NewLIR0(kPseudoMethodExit);
- UnSpillCoreRegs();
- OpReg(kOpBx, rs_rRAd);
-}
-
-void Mips64Mir2Lir::GenSpecialExitSequence() {
- OpReg(kOpBx, rs_rRAd);
-}
-
-void Mips64Mir2Lir::GenSpecialEntryForSuspend() {
- // Keep 16-byte stack alignment - push A0, i.e. ArtMethod* and RA.
- core_spill_mask_ = (1u << rs_rRAd.GetRegNum());
- num_core_spills_ = 1u;
- fp_spill_mask_ = 0u;
- num_fp_spills_ = 0u;
- frame_size_ = 16u;
- core_vmap_table_.clear();
- fp_vmap_table_.clear();
- OpRegImm(kOpSub, rs_rMIPS64_SP, frame_size_);
- StoreWordDisp(rs_rMIPS64_SP, frame_size_ - 8, rs_rRAd);
- StoreWordDisp(rs_rMIPS64_SP, 0, rs_rA0d);
-}
-
-void Mips64Mir2Lir::GenSpecialExitForSuspend() {
- // Pop the frame. Don't pop ArtMethod*, it's no longer needed.
- LoadWordDisp(rs_rMIPS64_SP, frame_size_ - 8, rs_rRAd);
- OpRegImm(kOpAdd, rs_rMIPS64_SP, frame_size_);
-}
-
-/*
- * Bit of a hack here - in the absence of a real scheduling pass,
- * emit the next instruction in static & direct invoke sequences.
- */
-static int Mips64NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, int state,
- const MethodReference& target_method, uint32_t,
- uintptr_t direct_code, uintptr_t direct_method, InvokeType type) {
- Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
- if (direct_code != 0 && direct_method != 0) {
- switch (state) {
- case 0: // Get the current Method* [sets kArg0]
- if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
- } else {
- cg->LoadCodeAddress(target_method, type, kInvokeTgt);
- }
- if (direct_method != static_cast<uintptr_t>(-1)) {
- cg->LoadConstantWide(cg->TargetReg(kArg0, kRef), direct_method);
- } else {
- cg->LoadMethodAddress(target_method, type, kArg0);
- }
- break;
- default:
- return -1;
- }
- } else {
- RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
- switch (state) {
- case 0: // Get the current Method* [sets kArg0]
- // TUNING: we can save a reg copy if Method* has been promoted.
- cg->LoadCurrMethodDirect(arg0_ref);
- break;
- case 1: // Get method->dex_cache_resolved_methods_
- cg->LoadRefDisp(arg0_ref, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- arg0_ref, kNotVolatile);
- // Set up direct code if known.
- if (direct_code != 0) {
- if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
- } else {
- CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
- cg->LoadCodeAddress(target_method, type, kInvokeTgt);
- }
- }
- break;
- case 2: // Grab target method*
- CHECK_EQ(cu->dex_file, target_method.dex_file);
- cg->LoadRefDisp(arg0_ref, mirror::ObjectArray<mirror::Object>::
- OffsetOfElement(target_method.dex_method_index).Int32Value(), arg0_ref,
- kNotVolatile);
- break;
- case 3: // Grab the code from the method*
- if (direct_code == 0) {
- int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- InstructionSetPointerSize(cu->instruction_set)).Int32Value();
- // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
- cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
- }
- break;
- default:
- return -1;
- }
- }
- return state + 1;
-}
-
-NextCallInsn Mips64Mir2Lir::GetNextSDCallInsn() {
- return Mips64NextSDCallInsn;
-}
-
-LIR* Mips64Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info ATTRIBUTE_UNUSED) {
- return OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips64/codegen_mips64.h b/compiler/dex/quick/mips64/codegen_mips64.h
deleted file mode 100644
index 57c30d8..0000000
--- a/compiler/dex/quick/mips64/codegen_mips64.h
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
-#define ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
-
-#include "dex/quick/mir_to_lir.h"
-#include "mips64_lir.h"
-
-namespace art {
-
-struct CompilationUnit;
-
-class Mips64Mir2Lir FINAL : public Mir2Lir {
- protected:
- class InToRegStorageMips64Mapper : public InToRegStorageMapper {
- public:
- explicit InToRegStorageMips64Mapper(Mir2Lir* m2l) : m2l_(m2l), cur_arg_reg_(0) {}
- virtual RegStorage GetNextReg(ShortyArg arg);
- virtual void Reset() OVERRIDE {
- cur_arg_reg_ = 0;
- }
- protected:
- Mir2Lir* m2l_;
- private:
- size_t cur_arg_reg_;
- };
-
- InToRegStorageMips64Mapper in_to_reg_storage_mips64_mapper_;
- InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
- in_to_reg_storage_mips64_mapper_.Reset();
- return &in_to_reg_storage_mips64_mapper_;
- }
-
- public:
- Mips64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
-
- // Required for target - codegen utilities.
- bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int lit);
- bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
- void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, int32_t constant)
- OVERRIDE;
- void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, int64_t constant)
- OVERRIDE;
- LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
- LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
- VolatileKind is_volatile) OVERRIDE;
- LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
- OpSize size) OVERRIDE;
- LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
- LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
- LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
- VolatileKind is_volatile) OVERRIDE;
- LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
- OpSize size) OVERRIDE;
- LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
- LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
-
- /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
- void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
-
- // Required for target - register utilities.
- RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
- RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE {
- if (wide_kind == kWide || wide_kind == kRef) {
- return As64BitReg(TargetReg(reg));
- } else {
- return Check32BitReg(TargetReg(reg));
- }
- }
- RegStorage TargetPtrReg(SpecialTargetRegister reg) OVERRIDE {
- return As64BitReg(TargetReg(reg));
- }
- RegLocation GetReturnAlt();
- RegLocation GetReturnWideAlt();
- RegLocation LocCReturn();
- RegLocation LocCReturnRef();
- RegLocation LocCReturnDouble();
- RegLocation LocCReturnFloat();
- RegLocation LocCReturnWide();
- ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
- void AdjustSpillMask();
- void ClobberCallerSave();
- void FreeCallTemps();
- void LockCallTemps();
- void CompilerInitializeRegAlloc();
-
- // Required for target - miscellaneous.
- void AssembleLIR();
- int AssignInsnOffsets();
- void AssignOffsets();
- AssemblerStatus AssembleInstructions(CodeOffset start_addr);
- void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
- void SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
- ResourceMask* def_mask) OVERRIDE;
- const char* GetTargetInstFmt(int opcode);
- const char* GetTargetInstName(int opcode);
- std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
- ResourceMask GetPCUseDefEncoding() const OVERRIDE;
- uint64_t GetTargetInstFlags(int opcode);
- size_t GetInsnSize(LIR* lir) OVERRIDE;
- bool IsUnconditionalBranch(LIR* lir);
-
- // Get the register class for load/store of a field.
- RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
-
- // Required for target - Dalvik-level generators.
- void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation lr_shift);
- void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, int flags);
- void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
- RegLocation rl_dest, int scale);
- void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
- RegLocation rl_src, int scale, bool card_mark);
- void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_shift, int flags);
- void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
- bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
- bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
- bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
- bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
- bool GenInlinedSqrt(CallInfo* info);
- bool GenInlinedPeek(CallInfo* info, OpSize size);
- bool GenInlinedPoke(CallInfo* info, OpSize size);
- void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
- void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, int flags) OVERRIDE;
- RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
- void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivZeroCheckWide(RegStorage reg);
- void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
- void GenExitSequence();
- void GenSpecialExitSequence() OVERRIDE;
- void GenSpecialEntryForSuspend() OVERRIDE;
- void GenSpecialExitForSuspend() OVERRIDE;
- void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
- void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
- void GenSelect(BasicBlock* bb, MIR* mir);
- void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
- int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) OVERRIDE;
- bool GenMemBarrier(MemBarrierKind barrier_kind);
- void GenMoveException(RegLocation rl_dest);
- void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit);
- void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
- void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
-
- // Required for target - single operation generators.
- LIR* OpUnconditionalBranch(LIR* target);
- LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
- LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
- LIR* OpCondBranch(ConditionCode cc, LIR* target);
- LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
- LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpIT(ConditionCode cond, const char* guide);
- void OpEndIT(LIR* it);
- LIR* OpMem(OpKind op, RegStorage r_base, int disp);
- LIR* OpPcRelLoad(RegStorage reg, LIR* target);
- LIR* OpReg(OpKind op, RegStorage r_dest_src);
- void OpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
- LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
- LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
- LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
- LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
- LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
- LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
- LIR* OpTestSuspend(LIR* target);
- LIR* OpVldm(RegStorage r_base, int count);
- LIR* OpVstm(RegStorage r_base, int count);
- void OpRegCopyWide(RegStorage dest, RegStorage src);
-
- // TODO: collapse r_dest.
- LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
- // TODO: collapse r_src.
- LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
- void SpillCoreRegs();
- void UnSpillCoreRegs();
- static const Mips64EncodingMap EncodingMap[kMips64Last];
- bool InexpensiveConstantInt(int32_t value);
- bool InexpensiveConstantFloat(int32_t value);
- bool InexpensiveConstantLong(int64_t value);
- bool InexpensiveConstantDouble(int64_t value);
-
- bool WideGPRsAreAliases() const OVERRIDE {
- return true; // 64b architecture.
- }
- bool WideFPRsAreAliases() const OVERRIDE {
- return true; // 64b architecture.
- }
-
- LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
- RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, int flags) OVERRIDE;
- RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div)
- OVERRIDE;
- NextCallInsn GetNextSDCallInsn() OVERRIDE;
- LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
- // Unimplemented intrinsics.
- bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
- OVERRIDE {
- return false;
- }
-
- private:
- void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
- void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags);
- void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
- RegisterClass reg_class);
-
- void ConvertShortToLongBranch(LIR* lir);
-
- /**
- * @param reg #RegStorage containing a Solo64 input register (e.g. @c a1 or @c d0).
- * @return A Solo32 with the same register number as the @p reg (e.g. @c a1 or @c f0).
- * @see As64BitReg
- */
- RegStorage As32BitReg(RegStorage reg) {
- DCHECK(!reg.IsPair());
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Expected 64b register";
- } else {
- LOG(WARNING) << "Expected 64b register";
- return reg;
- }
- }
- RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
- reg.GetRawBits() & RegStorage::kRegTypeMask);
- DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
- ->GetReg().GetReg(),
- ret_val.GetReg());
- return ret_val;
- }
-
- RegStorage Check32BitReg(RegStorage reg) {
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Checked for 32b register";
- } else {
- LOG(WARNING) << "Checked for 32b register";
- return As32BitReg(reg);
- }
- }
- return reg;
- }
-
- /**
- * @param reg #RegStorage containing a Solo32 input register (e.g. @c a1 or @c f0).
- * @return A Solo64 with the same register number as the @p reg (e.g. @c a1 or @c d0).
- */
- RegStorage As64BitReg(RegStorage reg) {
- DCHECK(!reg.IsPair());
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Expected 32b register";
- } else {
- LOG(WARNING) << "Expected 32b register";
- return reg;
- }
- }
- RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
- reg.GetRawBits() & RegStorage::kRegTypeMask);
- DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
- ->GetReg().GetReg(),
- ret_val.GetReg());
- return ret_val;
- }
-
- RegStorage Check64BitReg(RegStorage reg) {
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Checked for 64b register";
- } else {
- LOG(WARNING) << "Checked for 64b register";
- return As64BitReg(reg);
- }
- }
- return reg;
- }
-
- void GenBreakpoint(int code);
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
diff --git a/compiler/dex/quick/mips64/fp_mips64.cc b/compiler/dex/quick/mips64/fp_mips64.cc
deleted file mode 100644
index 5c8ee9c..0000000
--- a/compiler/dex/quick/mips64/fp_mips64.cc
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips64.h"
-
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "mips64_lir.h"
-
-namespace art {
-
-void Mips64Mir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- int op = kMips64Nop;
- RegLocation rl_result;
-
- /*
- * Don't attempt to optimize register usage since these opcodes call out to
- * the handlers.
- */
- switch (opcode) {
- case Instruction::ADD_FLOAT_2ADDR:
- case Instruction::ADD_FLOAT:
- op = kMips64Fadds;
- break;
- case Instruction::SUB_FLOAT_2ADDR:
- case Instruction::SUB_FLOAT:
- op = kMips64Fsubs;
- break;
- case Instruction::DIV_FLOAT_2ADDR:
- case Instruction::DIV_FLOAT:
- op = kMips64Fdivs;
- break;
- case Instruction::MUL_FLOAT_2ADDR:
- case Instruction::MUL_FLOAT:
- op = kMips64Fmuls;
- break;
- case Instruction::REM_FLOAT_2ADDR:
- case Instruction::REM_FLOAT:
- FlushAllRegs(); // Send everything to home location.
- CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
- rl_result = GetReturn(kFPReg);
- StoreValue(rl_dest, rl_result);
- return;
- case Instruction::NEG_FLOAT:
- GenNegFloat(rl_dest, rl_src1);
- return;
- default:
- LOG(FATAL) << "Unexpected opcode: " << opcode;
- }
- rl_src1 = LoadValue(rl_src1, kFPReg);
- rl_src2 = LoadValue(rl_src2, kFPReg);
- rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- StoreValue(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- int op = kMips64Nop;
- RegLocation rl_result;
-
- switch (opcode) {
- case Instruction::ADD_DOUBLE_2ADDR:
- case Instruction::ADD_DOUBLE:
- op = kMips64Faddd;
- break;
- case Instruction::SUB_DOUBLE_2ADDR:
- case Instruction::SUB_DOUBLE:
- op = kMips64Fsubd;
- break;
- case Instruction::DIV_DOUBLE_2ADDR:
- case Instruction::DIV_DOUBLE:
- op = kMips64Fdivd;
- break;
- case Instruction::MUL_DOUBLE_2ADDR:
- case Instruction::MUL_DOUBLE:
- op = kMips64Fmuld;
- break;
- case Instruction::REM_DOUBLE_2ADDR:
- case Instruction::REM_DOUBLE:
- FlushAllRegs(); // Send everything to home location.
- CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
- rl_result = GetReturnWide(kFPReg);
- StoreValueWide(rl_dest, rl_result);
- return;
- case Instruction::NEG_DOUBLE:
- GenNegDouble(rl_dest, rl_src1);
- return;
- default:
- LOG(FATAL) << "Unpexpected opcode: " << opcode;
- }
- rl_src1 = LoadValueWide(rl_src1, kFPReg);
- DCHECK(rl_src1.wide);
- rl_src2 = LoadValueWide(rl_src2, kFPReg);
- DCHECK(rl_src2.wide);
- rl_result = EvalLoc(rl_dest, kFPReg, true);
- DCHECK(rl_dest.wide);
- DCHECK(rl_result.wide);
- NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
- int32_t constant) {
- // TODO: need mips64 implementation.
- UNUSED(rl_dest, rl_src1, constant);
- LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips64";
-}
-
-void Mips64Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
- int64_t constant) {
- // TODO: need mips64 implementation.
- UNUSED(rl_dest, rl_src1, constant);
- LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips64";
-}
-
-void Mips64Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src) {
- int op = kMips64Nop;
- RegLocation rl_result;
- switch (opcode) {
- case Instruction::INT_TO_FLOAT:
- op = kMips64Fcvtsw;
- break;
- case Instruction::DOUBLE_TO_FLOAT:
- op = kMips64Fcvtsd;
- break;
- case Instruction::FLOAT_TO_DOUBLE:
- op = kMips64Fcvtds;
- break;
- case Instruction::INT_TO_DOUBLE:
- op = kMips64Fcvtdw;
- break;
- case Instruction::FLOAT_TO_INT:
- GenConversionCall(kQuickF2iz, rl_dest, rl_src, kCoreReg);
- return;
- case Instruction::DOUBLE_TO_INT:
- GenConversionCall(kQuickD2iz, rl_dest, rl_src, kCoreReg);
- return;
- case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(kQuickL2d, rl_dest, rl_src, kFPReg);
- return;
- case Instruction::FLOAT_TO_LONG:
- GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
- return;
- case Instruction::LONG_TO_FLOAT:
- GenConversionCall(kQuickL2f, rl_dest, rl_src, kFPReg);
- return;
- case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
- return;
- default:
- LOG(FATAL) << "Unexpected opcode: " << opcode;
- }
- if (rl_src.wide) {
- rl_src = LoadValueWide(rl_src, kFPReg);
- } else {
- rl_src = LoadValue(rl_src, kFPReg);
- }
- rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- if (rl_dest.wide) {
- StoreValueWide(rl_dest, rl_result);
- } else {
- StoreValue(rl_dest, rl_result);
- }
-}
-
-void Mips64Mir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- bool wide = true;
- QuickEntrypointEnum target;
-
- switch (opcode) {
- case Instruction::CMPL_FLOAT:
- target = kQuickCmplFloat;
- wide = false;
- break;
- case Instruction::CMPG_FLOAT:
- target = kQuickCmpgFloat;
- wide = false;
- break;
- case Instruction::CMPL_DOUBLE:
- target = kQuickCmplDouble;
- break;
- case Instruction::CMPG_DOUBLE:
- target = kQuickCmpgDouble;
- break;
- default:
- LOG(FATAL) << "Unexpected opcode: " << opcode;
- target = kQuickCmplFloat;
- }
- FlushAllRegs();
- LockCallTemps();
- if (wide) {
- RegStorage r_tmp1(RegStorage::k64BitSolo, rMIPS64_FARG0);
- RegStorage r_tmp2(RegStorage::k64BitSolo, rMIPS64_FARG1);
- LoadValueDirectWideFixed(rl_src1, r_tmp1);
- LoadValueDirectWideFixed(rl_src2, r_tmp2);
- } else {
- LoadValueDirectFixed(rl_src1, rs_rMIPS64_FARG0);
- LoadValueDirectFixed(rl_src2, rs_rMIPS64_FARG1);
- }
- RegStorage r_tgt = LoadHelper(target);
- // NOTE: not a safepoint.
- OpReg(kOpBlx, r_tgt);
- RegLocation rl_result = GetReturn(kCoreReg);
- StoreValue(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
- UNUSED(bb, mir, gt_bias, is_double);
- UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
-}
-
-void Mips64Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
- RegLocation rl_result;
- rl_src = LoadValue(rl_src, kFPReg);
- rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kMips64Fnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- StoreValue(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
- RegLocation rl_result;
- rl_src = LoadValueWide(rl_src, kFPReg);
- rl_result = EvalLocWide(rl_dest, kFPReg, true);
- NewLIR2(kMips64Fnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- StoreValueWide(rl_dest, rl_result);
-}
-
-bool Mips64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
- // TODO: need Mips64 implementation.
- UNUSED(info, is_min, is_long);
- return false;
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips64/int_mips64.cc b/compiler/dex/quick/mips64/int_mips64.cc
deleted file mode 100644
index 8a57c82..0000000
--- a/compiler/dex/quick/mips64/int_mips64.cc
+++ /dev/null
@@ -1,692 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the Mips64 ISA */
-
-#include "codegen_mips64.h"
-
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "mips64_lir.h"
-#include "mirror/array-inl.h"
-
-namespace art {
-
-/*
- * Compare two 64-bit values
- * x = y return 0
- * x < y return -1
- * x > y return 1
- *
- * slt temp, x, y; # (x < y) ? 1:0
- * slt res, y, x; # (x > y) ? 1:0
- * subu res, res, temp; # res = -1:1:0 for [ < > = ]
- *
- */
-void Mips64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- RegStorage temp = AllocTempWide();
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(kMips64Slt, temp.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- NewLIR3(kMips64Slt, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
- NewLIR3(kMips64Subu, rl_result.reg.GetReg(), rl_result.reg.GetReg(), temp.GetReg());
- FreeTemp(temp);
- StoreValue(rl_dest, rl_result);
-}
-
-LIR* Mips64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
- LIR* branch;
- Mips64OpCode slt_op;
- Mips64OpCode br_op;
- bool cmp_zero = false;
- bool swapped = false;
- switch (cond) {
- case kCondEq:
- br_op = kMips64Beq;
- cmp_zero = true;
- break;
- case kCondNe:
- br_op = kMips64Bne;
- cmp_zero = true;
- break;
- case kCondUlt:
- slt_op = kMips64Sltu;
- br_op = kMips64Bnez;
- break;
- case kCondUge:
- slt_op = kMips64Sltu;
- br_op = kMips64Beqz;
- break;
- case kCondGe:
- slt_op = kMips64Slt;
- br_op = kMips64Beqz;
- break;
- case kCondGt:
- slt_op = kMips64Slt;
- br_op = kMips64Bnez;
- swapped = true;
- break;
- case kCondLe:
- slt_op = kMips64Slt;
- br_op = kMips64Beqz;
- swapped = true;
- break;
- case kCondLt:
- slt_op = kMips64Slt;
- br_op = kMips64Bnez;
- break;
- case kCondHi: // Gtu
- slt_op = kMips64Sltu;
- br_op = kMips64Bnez;
- swapped = true;
- break;
- default:
- LOG(FATAL) << "No support for ConditionCode: " << cond;
- return NULL;
- }
- if (cmp_zero) {
- branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
- } else {
- RegStorage t_reg = AllocTemp();
- if (swapped) {
- NewLIR3(slt_op, t_reg.GetReg(), src2.GetReg(), src1.GetReg());
- } else {
- NewLIR3(slt_op, t_reg.GetReg(), src1.GetReg(), src2.GetReg());
- }
- branch = NewLIR1(br_op, t_reg.GetReg());
- FreeTemp(t_reg);
- }
- branch->target = target;
- return branch;
-}
-
-LIR* Mips64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg,
- int check_value, LIR* target) {
- LIR* branch;
- if (check_value != 0) {
- // TUNING: handle s16 & kCondLt/Mi case using slti.
- RegStorage t_reg = AllocTemp();
- LoadConstant(t_reg, check_value);
- branch = OpCmpBranch(cond, reg, t_reg, target);
- FreeTemp(t_reg);
- return branch;
- }
- Mips64OpCode opc;
- switch (cond) {
- case kCondEq: opc = kMips64Beqz; break;
- case kCondGe: opc = kMips64Bgez; break;
- case kCondGt: opc = kMips64Bgtz; break;
- case kCondLe: opc = kMips64Blez; break;
- // case KCondMi:
- case kCondLt: opc = kMips64Bltz; break;
- case kCondNe: opc = kMips64Bnez; break;
- default:
- // Tuning: use slti when applicable.
- RegStorage t_reg = AllocTemp();
- LoadConstant(t_reg, check_value);
- branch = OpCmpBranch(cond, reg, t_reg, target);
- FreeTemp(t_reg);
- return branch;
- }
- branch = NewLIR1(opc, reg.GetReg());
- branch->target = target;
- return branch;
-}
-
-LIR* Mips64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
- DCHECK(!r_dest.IsPair() && !r_src.IsPair());
- if (r_dest.IsFloat() || r_src.IsFloat())
- return OpFpRegCopy(r_dest, r_src);
- // TODO: Check that r_src and r_dest are both 32 or both 64 bits length.
- LIR* res;
- if (r_dest.Is64Bit() || r_src.Is64Bit()) {
- res = RawLIR(current_dalvik_offset_, kMips64Move, r_dest.GetReg(), r_src.GetReg());
- } else {
- res = RawLIR(current_dalvik_offset_, kMips64Sll, r_dest.GetReg(), r_src.GetReg(), 0);
- }
- if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
- res->flags.is_nop = true;
- }
- return res;
-}
-
-void Mips64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
- if (r_dest != r_src) {
- LIR *res = OpRegCopyNoInsert(r_dest, r_src);
- AppendLIR(res);
- }
-}
-
-void Mips64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
- OpRegCopy(r_dest, r_src);
-}
-
-void Mips64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
- int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) {
- UNUSED(dest_reg_class);
- // Implement as a branch-over.
- // TODO: Conditional move?
- LoadConstant(rs_dest, true_val);
- LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
- LoadConstant(rs_dest, false_val);
- LIR* target_label = NewLIR0(kPseudoTargetLabel);
- ne_branchover->target = target_label;
-}
-
-void Mips64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
- UNIMPLEMENTED(FATAL) << "Need codegen for select";
-}
-
-void Mips64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
- UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
-}
-
-RegLocation Mips64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
- bool is_div) {
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(is_div ? kMips64Div : kMips64Mod, rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
- return rl_result;
-}
-
-RegLocation Mips64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit,
- bool is_div) {
- RegStorage t_reg = AllocTemp();
- NewLIR3(kMips64Addiu, t_reg.GetReg(), rZERO, lit);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(is_div ? kMips64Div : kMips64Mod, rl_result.reg.GetReg(), reg1.GetReg(), t_reg.GetReg());
- FreeTemp(t_reg);
- return rl_result;
-}
-
-RegLocation Mips64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
- LOG(FATAL) << "Unexpected use of GenDivRem for Mips64";
- UNREACHABLE();
-}
-
-RegLocation Mips64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
- LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips64";
- UNREACHABLE();
-}
-
-bool Mips64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
- UNUSED(info, is_long, is_object);
- return false;
-}
-
-bool Mips64Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
- UNUSED(info);
- // TODO: add Mips64 implementation.
- return false;
-}
-
-bool Mips64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- UNUSED(info);
- // TODO: add Mips64 implementation.
- return false;
-}
-
-bool Mips64Mir2Lir::GenInlinedSqrt(CallInfo* info) {
- UNUSED(info);
- return false;
-}
-
-bool Mips64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
- if (size != kSignedByte) {
- // MIPS64 supports only aligned access. Defer unaligned access to JNI implementation.
- return false;
- }
- RegLocation rl_src_address = info->args[0]; // Long address.
- RegLocation rl_dest = InlineTarget(info);
- RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- DCHECK(size == kSignedByte);
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
- StoreValue(rl_dest, rl_result);
- return true;
-}
-
-bool Mips64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
- if (size != kSignedByte) {
- // MIPS64 supports only aligned access. Defer unaligned access to JNI implementation.
- return false;
- }
- RegLocation rl_src_address = info->args[0]; // Long address.
- RegLocation rl_src_value = info->args[2]; // [size] value.
- RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
- DCHECK(size == kSignedByte);
- RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
- return true;
-}
-
-LIR* Mips64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
- UNUSED(reg, target);
- LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
- LOG(FATAL) << "Unexpected use of OpVldm for Mips64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
- LOG(FATAL) << "Unexpected use of OpVstm for Mips64";
- UNREACHABLE();
-}
-
-void Mips64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
- int lit, int first_bit, int second_bit) {
- UNUSED(lit);
- RegStorage t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
- OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
- FreeTemp(t_reg);
- if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
- }
-}
-
-void Mips64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
- GenDivZeroCheck(reg);
-}
-
-// Test suspend flag, return target of taken suspend branch.
-LIR* Mips64Mir2Lir::OpTestSuspend(LIR* target) {
- OpRegImm(kOpSub, rs_rMIPS64_SUSPEND, 1);
- return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rs_rMIPS64_SUSPEND, 0, target);
-}
-
-// Decrement register and branch on condition.
-LIR* Mips64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
- OpRegImm(kOpSub, reg, 1);
- return OpCmpImmBranch(c_code, reg, 0, target);
-}
-
-bool Mips64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
- LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips64";
- UNREACHABLE();
-}
-
-bool Mips64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
- LOG(FATAL) << "Unexpected use of easyMultiply in Mips64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
- UNUSED(cond, guide);
- LOG(FATAL) << "Unexpected use of OpIT in Mips64";
- UNREACHABLE();
-}
-
-void Mips64Mir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
- LOG(FATAL) << "Unexpected use of OpEndIT in Mips64";
-}
-
-void Mips64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2, int flags) {
- switch (opcode) {
- case Instruction::NOT_LONG:
- GenNotLong(rl_dest, rl_src2);
- return;
- case Instruction::ADD_LONG:
- case Instruction::ADD_LONG_2ADDR:
- GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::SUB_LONG:
- case Instruction::SUB_LONG_2ADDR:
- GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::MUL_LONG:
- case Instruction::MUL_LONG_2ADDR:
- GenMulLong(rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::DIV_LONG:
- case Instruction::DIV_LONG_2ADDR:
- GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
- return;
- case Instruction::REM_LONG:
- case Instruction::REM_LONG_2ADDR:
- GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
- return;
- case Instruction::AND_LONG:
- case Instruction::AND_LONG_2ADDR:
- GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::OR_LONG:
- case Instruction::OR_LONG_2ADDR:
- GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::XOR_LONG:
- case Instruction::XOR_LONG_2ADDR:
- GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::NEG_LONG:
- GenNegLong(rl_dest, rl_src2);
- return;
-
- default:
- LOG(FATAL) << "Invalid long arith op";
- return;
- }
-}
-
-void Mips64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
- rl_src = LoadValueWide(rl_src, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegReg(kOpMvn, rl_result.reg, rl_src.reg);
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
- rl_src = LoadValueWide(rl_src, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- NewLIR3(kMips64Dmul, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2, bool is_div,
- int flags) {
- UNUSED(opcode);
- // TODO: Implement easy div/rem?
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
- GenDivZeroCheckWide(rl_src2.reg);
- }
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- NewLIR3(is_div ? kMips64Ddiv : kMips64Dmod, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
- rl_src2.reg.GetReg());
- StoreValueWide(rl_dest, rl_result);
-}
-
-/*
- * Generate array load
- */
-void Mips64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_dest, int scale) {
- RegisterClass reg_class = RegClassBySize(size);
- int len_offset = mirror::Array::LengthOffset().Int32Value();
- int data_offset;
- RegLocation rl_result;
- rl_array = LoadValue(rl_array, kRefReg);
- rl_index = LoadValue(rl_index, kCoreReg);
-
- // FIXME: need to add support for rl_index.is_const.
-
- if (size == k64 || size == kDouble) {
- data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
- } else {
- data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
- }
-
- // Null object?
- GenNullCheck(rl_array.reg, opt_flags);
-
- RegStorage reg_ptr = AllocTempRef();
- bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- RegStorage reg_len;
- if (needs_range_check) {
- reg_len = AllocTemp();
- // Get len.
- Load32Disp(rl_array.reg, len_offset, reg_len);
- }
- // reg_ptr -> array data.
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
- FreeTemp(rl_array.reg);
- if ((size == k64) || (size == kDouble)) {
- if (scale) {
- RegStorage r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
- OpRegReg(kOpAdd, reg_ptr, r_new_index);
- FreeTemp(r_new_index);
- } else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
- }
- FreeTemp(rl_index.reg);
- rl_result = EvalLoc(rl_dest, reg_class, true);
-
- if (needs_range_check) {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- FreeTemp(reg_len);
- }
- LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, kNotVolatile);
-
- FreeTemp(reg_ptr);
- StoreValueWide(rl_dest, rl_result);
- } else {
- rl_result = EvalLoc(rl_dest, reg_class, true);
-
- if (needs_range_check) {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- FreeTemp(reg_len);
- }
- if (rl_result.ref) {
- LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), As32BitReg(rl_result.reg), scale,
- kReference);
- } else {
- LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
- }
-
- FreeTemp(reg_ptr);
- StoreValue(rl_dest, rl_result);
- }
-}
-
-/*
- * Generate array store
- *
- */
-void Mips64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_src, int scale,
- bool card_mark) {
- RegisterClass reg_class = RegClassBySize(size);
- int len_offset = mirror::Array::LengthOffset().Int32Value();
- int data_offset;
-
- if (size == k64 || size == kDouble) {
- data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
- } else {
- data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
- }
-
- rl_array = LoadValue(rl_array, kRefReg);
- rl_index = LoadValue(rl_index, kCoreReg);
-
- // FIXME: need to add support for rl_index.is_const.
-
- RegStorage reg_ptr;
- bool allocated_reg_ptr_temp = false;
- if (IsTemp(rl_array.reg) && !card_mark) {
- Clobber(rl_array.reg);
- reg_ptr = rl_array.reg;
- } else {
- reg_ptr = AllocTemp();
- OpRegCopy(reg_ptr, rl_array.reg);
- allocated_reg_ptr_temp = true;
- }
-
- // Null object?
- GenNullCheck(rl_array.reg, opt_flags);
-
- bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- RegStorage reg_len;
- if (needs_range_check) {
- reg_len = AllocTemp();
- // NOTE: max live temps(4) here.
- // Get len.
- Load32Disp(rl_array.reg, len_offset, reg_len);
- }
- // reg_ptr -> array data.
- OpRegImm(kOpAdd, reg_ptr, data_offset);
- // At this point, reg_ptr points to array, 2 live temps.
- if ((size == k64) || (size == kDouble)) {
- // TUNING: specific wide routine that can handle fp regs.
- if (scale) {
- RegStorage r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
- OpRegReg(kOpAdd, reg_ptr, r_new_index);
- FreeTemp(r_new_index);
- } else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
- }
- rl_src = LoadValueWide(rl_src, reg_class);
-
- if (needs_range_check) {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- FreeTemp(reg_len);
- }
-
- StoreBaseDisp(reg_ptr, 0, rl_src.reg, size, kNotVolatile);
- } else {
- rl_src = LoadValue(rl_src, reg_class);
- if (needs_range_check) {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- FreeTemp(reg_len);
- }
- StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
- }
- if (allocated_reg_ptr_temp) {
- FreeTemp(reg_ptr);
- }
- if (card_mark) {
- MarkGCCard(opt_flags, rl_src.reg, rl_array.reg);
- }
-}
-
-void Mips64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift) {
- OpKind op = kOpBkpt;
- switch (opcode) {
- case Instruction::SHL_LONG:
- case Instruction::SHL_LONG_2ADDR:
- op = kOpLsl;
- break;
- case Instruction::SHR_LONG:
- case Instruction::SHR_LONG_2ADDR:
- op = kOpAsr;
- break;
- case Instruction::USHR_LONG:
- case Instruction::USHR_LONG_2ADDR:
- op = kOpLsr;
- break;
- default:
- LOG(FATAL) << "Unexpected case: " << opcode;
- }
- rl_shift = LoadValue(rl_shift, kCoreReg);
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift, int flags) {
- UNUSED(flags);
- OpKind op = kOpBkpt;
- // Per spec, we only care about low 6 bits of shift amount.
- int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- if (shift_amount == 0) {
- StoreValueWide(rl_dest, rl_src1);
- return;
- }
-
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- switch (opcode) {
- case Instruction::SHL_LONG:
- case Instruction::SHL_LONG_2ADDR:
- op = kOpLsl;
- break;
- case Instruction::SHR_LONG:
- case Instruction::SHR_LONG_2ADDR:
- op = kOpAsr;
- break;
- case Instruction::USHR_LONG:
- case Instruction::USHR_LONG_2ADDR:
- op = kOpLsr;
- break;
- default:
- LOG(FATAL) << "Unexpected case";
- }
- OpRegRegImm(op, rl_result.reg, rl_src1.reg, shift_amount);
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2, int flags) {
- // Default - bail to non-const handler.
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-}
-
-void Mips64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
- rl_src = LoadValue(rl_src, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- NewLIR3(kMips64Sll, rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0);
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
- RegLocation rl_src, RegisterClass reg_class) {
- FlushAllRegs(); // Send everything to home location.
- CallRuntimeHelperRegLocation(trampoline, rl_src, false);
- if (rl_dest.wide) {
- RegLocation rl_result;
- rl_result = GetReturnWide(reg_class);
- StoreValueWide(rl_dest, rl_result);
- } else {
- RegLocation rl_result;
- rl_result = GetReturn(reg_class);
- StoreValue(rl_dest, rl_result);
- }
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips64/mips64_lir.h b/compiler/dex/quick/mips64/mips64_lir.h
deleted file mode 100644
index 4a5c5ce..0000000
--- a/compiler/dex/quick/mips64/mips64_lir.h
+++ /dev/null
@@ -1,648 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
-#define ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
-
-#include "dex/reg_location.h"
-#include "dex/reg_storage.h"
-
-namespace art {
-
-/*
- * Runtime register conventions.
- *
- * zero is always the value 0
- * at is scratch (normally used as temp reg by assembler)
- * v0, v1 are scratch (normally hold subroutine return values)
- * a0-a7 are scratch (normally hold subroutine arguments)
- * t0-t3, t8 are scratch
- * t9 is scratch (normally used for function calls)
- * s0 (rMIPS_SUSPEND) is reserved [holds suspend-check counter]
- * s1 (rMIPS_SELF) is reserved [holds current &Thread]
- * s2-s7 are callee save (promotion target)
- * k0, k1 are reserved for use by interrupt handlers
- * gp is reserved for global pointer
- * sp is reserved
- * s8 is callee save (promotion target)
- * ra is scratch (normally holds the return addr)
- *
- * Preserved across C calls: s0-s8
- * Trashed across C calls: at, v0-v1, a0-a7, t0-t3, t8-t9, gp, ra
- *
- * Floating pointer registers
- * NOTE: there are 32 fp registers.
- * f0-f31
- *
- * f0-f31 trashed across C calls
- *
- * For mips64 code use:
- * a0-a7 to hold operands
- * v0-v1 to hold results
- * t0-t3, t8-t9 for temps
- *
- * All jump/branch instructions have a delay slot after it.
- *
- * Stack frame diagram (stack grows down, higher addresses at top):
- *
- * +------------------------+
- * | IN[ins-1] | {Note: resides in caller's frame}
- * | . |
- * | IN[0] |
- * | caller's Method* |
- * +========================+ {Note: start of callee's frame}
- * | spill region | {variable sized - will include lr if non-leaf.}
- * +------------------------+
- * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
- * +------------------------+
- * | V[locals-1] |
- * | V[locals-2] |
- * | . |
- * | . |
- * | V[1] |
- * | V[0] |
- * +------------------------+
- * | 0 to 3 words padding |
- * +------------------------+
- * | OUT[outs-1] |
- * | OUT[outs-2] |
- * | . |
- * | OUT[0] |
- * | cur_method* | <<== sp w/ 16-byte alignment
- * +========================+
- */
-
-
-#define rARG0 rA0d
-#define rs_rARG0 rs_rA0d
-#define rARG1 rA1d
-#define rs_rARG1 rs_rA1d
-#define rARG2 rA2d
-#define rs_rARG2 rs_rA2d
-#define rARG3 rA3d
-#define rs_rARG3 rs_rA3d
-#define rARG4 rA4d
-#define rs_rARG4 rs_rA4d
-#define rARG5 rA5d
-#define rs_rARG5 rs_rA5d
-#define rARG6 rA6d
-#define rs_rARG6 rs_rA6d
-#define rARG7 rA7d
-#define rs_rARG7 rs_rA7d
-#define rRESULT0 rV0d
-#define rs_rRESULT0 rs_rV0d
-#define rRESULT1 rV1d
-#define rs_rRESULT1 rs_rV1d
-
-#define rFARG0 rF12
-#define rs_rFARG0 rs_rF12
-#define rFARG1 rF13
-#define rs_rFARG1 rs_rF13
-#define rFARG2 rF14
-#define rs_rFARG2 rs_rF14
-#define rFARG3 rF15
-#define rs_rFARG3 rs_rF15
-#define rFARG4 rF16
-#define rs_rFARG4 rs_rF16
-#define rFARG5 rF17
-#define rs_rFARG5 rs_rF17
-#define rFARG6 rF18
-#define rs_rFARG6 rs_rF18
-#define rFARG7 rF19
-#define rs_rFARG7 rs_rF19
-#define rFRESULT0 rF0
-#define rs_rFRESULT0 rs_rF0
-#define rFRESULT1 rF1
-#define rs_rFRESULT1 rs_rF1
-
-// Regs not used for Mips64.
-#define rMIPS64_LR RegStorage::kInvalidRegVal
-#define rMIPS64_PC RegStorage::kInvalidRegVal
-
-enum Mips64ResourceEncodingPos {
- kMips64GPReg0 = 0,
- kMips64RegSP = 29,
- kMips64RegLR = 31,
- kMips64FPReg0 = 32,
- kMips64FPRegEnd = 64,
- kMips64RegPC = kMips64FPRegEnd,
- kMips64RegEnd = 65,
-};
-
-enum Mips64NativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
- rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
- rZEROd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
- rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
- rATd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
- rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
- rV0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
- rV1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
- rV1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
- rA0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
- rA0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
- rA1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
- rA1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
- rA2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
- rA2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
- rA3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
- rA3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
- rA4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
- rA4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
- rA5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
- rA5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
- rA6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
- rA6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
- rA7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
- rA7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
- rT0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
- rT0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
- rT1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
- rT1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
- rT2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
- rT2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
- rT3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
- rT3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
- rS0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
- rS0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 16,
- rS1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
- rS1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 17,
- rS2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
- rS2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 18,
- rS3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
- rS3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 19,
- rS4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
- rS4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 20,
- rS5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
- rS5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 21,
- rS6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
- rS6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 22,
- rS7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
- rS7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 23,
- rT8 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
- rT8d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 24,
- rT9 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
- rT9d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 25,
- rK0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
- rK0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 26,
- rK1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
- rK1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 27,
- rGP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
- rGPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 28,
- rSP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
- rSPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 29,
- rFP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
- rFPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 30,
- rRA = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
- rRAd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 31,
-
- rF0 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
- rF1 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
- rF2 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 2,
- rF3 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 3,
- rF4 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 4,
- rF5 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 5,
- rF6 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 6,
- rF7 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 7,
- rF8 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 8,
- rF9 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 9,
- rF10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
- rF11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
- rF12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
- rF13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
- rF14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
- rF15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
- rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
- rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
- rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
- rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
- rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
- rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
- rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
- rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
- rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
- rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
- rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
- rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
- rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
- rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
- rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
- rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
-
- rD0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
- rD1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
- rD2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
- rD3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
- rD4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
- rD5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
- rD6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
- rD7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
- rD8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
- rD9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
- rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
- rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
- rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
- rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
- rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
- rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
- rD16 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
- rD17 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
- rD18 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
- rD19 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
- rD20 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
- rD21 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
- rD22 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
- rD23 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
- rD24 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
- rD25 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
- rD26 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
- rD27 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
- rD28 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
- rD29 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
- rD30 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
- rD31 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
-};
-
-constexpr RegStorage rs_rZERO(RegStorage::kValid | rZERO);
-constexpr RegStorage rs_rZEROd(RegStorage::kValid | rZEROd);
-constexpr RegStorage rs_rAT(RegStorage::kValid | rAT);
-constexpr RegStorage rs_rATd(RegStorage::kValid | rATd);
-constexpr RegStorage rs_rV0(RegStorage::kValid | rV0);
-constexpr RegStorage rs_rV0d(RegStorage::kValid | rV0d);
-constexpr RegStorage rs_rV1(RegStorage::kValid | rV1);
-constexpr RegStorage rs_rV1d(RegStorage::kValid | rV1d);
-constexpr RegStorage rs_rA0(RegStorage::kValid | rA0);
-constexpr RegStorage rs_rA0d(RegStorage::kValid | rA0d);
-constexpr RegStorage rs_rA1(RegStorage::kValid | rA1);
-constexpr RegStorage rs_rA1d(RegStorage::kValid | rA1d);
-constexpr RegStorage rs_rA2(RegStorage::kValid | rA2);
-constexpr RegStorage rs_rA2d(RegStorage::kValid | rA2d);
-constexpr RegStorage rs_rA3(RegStorage::kValid | rA3);
-constexpr RegStorage rs_rA3d(RegStorage::kValid | rA3d);
-constexpr RegStorage rs_rA4(RegStorage::kValid | rA4);
-constexpr RegStorage rs_rA4d(RegStorage::kValid | rA4d);
-constexpr RegStorage rs_rA5(RegStorage::kValid | rA5);
-constexpr RegStorage rs_rA5d(RegStorage::kValid | rA5d);
-constexpr RegStorage rs_rA6(RegStorage::kValid | rA6);
-constexpr RegStorage rs_rA6d(RegStorage::kValid | rA6d);
-constexpr RegStorage rs_rA7(RegStorage::kValid | rA7);
-constexpr RegStorage rs_rA7d(RegStorage::kValid | rA7d);
-constexpr RegStorage rs_rT0(RegStorage::kValid | rT0);
-constexpr RegStorage rs_rT0d(RegStorage::kValid | rT0d);
-constexpr RegStorage rs_rT1(RegStorage::kValid | rT1);
-constexpr RegStorage rs_rT1d(RegStorage::kValid | rT1d);
-constexpr RegStorage rs_rT2(RegStorage::kValid | rT2);
-constexpr RegStorage rs_rT2d(RegStorage::kValid | rT2d);
-constexpr RegStorage rs_rT3(RegStorage::kValid | rT3);
-constexpr RegStorage rs_rT3d(RegStorage::kValid | rT3d);
-constexpr RegStorage rs_rS0(RegStorage::kValid | rS0);
-constexpr RegStorage rs_rS0d(RegStorage::kValid | rS0d);
-constexpr RegStorage rs_rS1(RegStorage::kValid | rS1);
-constexpr RegStorage rs_rS1d(RegStorage::kValid | rS1d);
-constexpr RegStorage rs_rS2(RegStorage::kValid | rS2);
-constexpr RegStorage rs_rS2d(RegStorage::kValid | rS2d);
-constexpr RegStorage rs_rS3(RegStorage::kValid | rS3);
-constexpr RegStorage rs_rS3d(RegStorage::kValid | rS3d);
-constexpr RegStorage rs_rS4(RegStorage::kValid | rS4);
-constexpr RegStorage rs_rS4d(RegStorage::kValid | rS4d);
-constexpr RegStorage rs_rS5(RegStorage::kValid | rS5);
-constexpr RegStorage rs_rS5d(RegStorage::kValid | rS5d);
-constexpr RegStorage rs_rS6(RegStorage::kValid | rS6);
-constexpr RegStorage rs_rS6d(RegStorage::kValid | rS6d);
-constexpr RegStorage rs_rS7(RegStorage::kValid | rS7);
-constexpr RegStorage rs_rS7d(RegStorage::kValid | rS7d);
-constexpr RegStorage rs_rT8(RegStorage::kValid | rT8);
-constexpr RegStorage rs_rT8d(RegStorage::kValid | rT8d);
-constexpr RegStorage rs_rT9(RegStorage::kValid | rT9);
-constexpr RegStorage rs_rT9d(RegStorage::kValid | rT9d);
-constexpr RegStorage rs_rK0(RegStorage::kValid | rK0);
-constexpr RegStorage rs_rK0d(RegStorage::kValid | rK0d);
-constexpr RegStorage rs_rK1(RegStorage::kValid | rK1);
-constexpr RegStorage rs_rK1d(RegStorage::kValid | rK1d);
-constexpr RegStorage rs_rGP(RegStorage::kValid | rGP);
-constexpr RegStorage rs_rGPd(RegStorage::kValid | rGPd);
-constexpr RegStorage rs_rSP(RegStorage::kValid | rSP);
-constexpr RegStorage rs_rSPd(RegStorage::kValid | rSPd);
-constexpr RegStorage rs_rFP(RegStorage::kValid | rFP);
-constexpr RegStorage rs_rFPd(RegStorage::kValid | rFPd);
-constexpr RegStorage rs_rRA(RegStorage::kValid | rRA);
-constexpr RegStorage rs_rRAd(RegStorage::kValid | rRAd);
-
-constexpr RegStorage rs_rMIPS64_LR(RegStorage::kInvalid); // Not used for MIPS64.
-constexpr RegStorage rs_rMIPS64_PC(RegStorage::kInvalid); // Not used for MIPS64.
-constexpr RegStorage rs_rMIPS64_COUNT(RegStorage::kInvalid); // Not used for MIPS64.
-
-constexpr RegStorage rs_rF0(RegStorage::kValid | rF0);
-constexpr RegStorage rs_rF1(RegStorage::kValid | rF1);
-constexpr RegStorage rs_rF2(RegStorage::kValid | rF2);
-constexpr RegStorage rs_rF3(RegStorage::kValid | rF3);
-constexpr RegStorage rs_rF4(RegStorage::kValid | rF4);
-constexpr RegStorage rs_rF5(RegStorage::kValid | rF5);
-constexpr RegStorage rs_rF6(RegStorage::kValid | rF6);
-constexpr RegStorage rs_rF7(RegStorage::kValid | rF7);
-constexpr RegStorage rs_rF8(RegStorage::kValid | rF8);
-constexpr RegStorage rs_rF9(RegStorage::kValid | rF9);
-constexpr RegStorage rs_rF10(RegStorage::kValid | rF10);
-constexpr RegStorage rs_rF11(RegStorage::kValid | rF11);
-constexpr RegStorage rs_rF12(RegStorage::kValid | rF12);
-constexpr RegStorage rs_rF13(RegStorage::kValid | rF13);
-constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
-constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
-constexpr RegStorage rs_rF16(RegStorage::kValid | rF16);
-constexpr RegStorage rs_rF17(RegStorage::kValid | rF17);
-constexpr RegStorage rs_rF18(RegStorage::kValid | rF18);
-constexpr RegStorage rs_rF19(RegStorage::kValid | rF19);
-constexpr RegStorage rs_rF20(RegStorage::kValid | rF20);
-constexpr RegStorage rs_rF21(RegStorage::kValid | rF21);
-constexpr RegStorage rs_rF22(RegStorage::kValid | rF22);
-constexpr RegStorage rs_rF23(RegStorage::kValid | rF23);
-constexpr RegStorage rs_rF24(RegStorage::kValid | rF24);
-constexpr RegStorage rs_rF25(RegStorage::kValid | rF25);
-constexpr RegStorage rs_rF26(RegStorage::kValid | rF26);
-constexpr RegStorage rs_rF27(RegStorage::kValid | rF27);
-constexpr RegStorage rs_rF28(RegStorage::kValid | rF28);
-constexpr RegStorage rs_rF29(RegStorage::kValid | rF29);
-constexpr RegStorage rs_rF30(RegStorage::kValid | rF30);
-constexpr RegStorage rs_rF31(RegStorage::kValid | rF31);
-
-constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
-constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
-constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
-constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
-constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
-constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
-constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
-constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
-constexpr RegStorage rs_rD8(RegStorage::kValid | rD8);
-constexpr RegStorage rs_rD9(RegStorage::kValid | rD9);
-constexpr RegStorage rs_rD10(RegStorage::kValid | rD10);
-constexpr RegStorage rs_rD11(RegStorage::kValid | rD11);
-constexpr RegStorage rs_rD12(RegStorage::kValid | rD12);
-constexpr RegStorage rs_rD13(RegStorage::kValid | rD13);
-constexpr RegStorage rs_rD14(RegStorage::kValid | rD14);
-constexpr RegStorage rs_rD15(RegStorage::kValid | rD15);
-constexpr RegStorage rs_rD16(RegStorage::kValid | rD16);
-constexpr RegStorage rs_rD17(RegStorage::kValid | rD17);
-constexpr RegStorage rs_rD18(RegStorage::kValid | rD18);
-constexpr RegStorage rs_rD19(RegStorage::kValid | rD19);
-constexpr RegStorage rs_rD20(RegStorage::kValid | rD20);
-constexpr RegStorage rs_rD21(RegStorage::kValid | rD21);
-constexpr RegStorage rs_rD22(RegStorage::kValid | rD22);
-constexpr RegStorage rs_rD23(RegStorage::kValid | rD23);
-constexpr RegStorage rs_rD24(RegStorage::kValid | rD24);
-constexpr RegStorage rs_rD25(RegStorage::kValid | rD25);
-constexpr RegStorage rs_rD26(RegStorage::kValid | rD26);
-constexpr RegStorage rs_rD27(RegStorage::kValid | rD27);
-constexpr RegStorage rs_rD28(RegStorage::kValid | rD28);
-constexpr RegStorage rs_rD29(RegStorage::kValid | rD29);
-constexpr RegStorage rs_rD30(RegStorage::kValid | rD30);
-constexpr RegStorage rs_rD31(RegStorage::kValid | rD31);
-
-// TODO: reduce/eliminate use of these.
-#define rMIPS64_SUSPEND rS0d
-#define rs_rMIPS64_SUSPEND rs_rS0d
-#define rMIPS64_SELF rS1d
-#define rs_rMIPS64_SELF rs_rS1d
-#define rMIPS64_SP rSPd
-#define rs_rMIPS64_SP rs_rSPd
-#define rMIPS64_ARG0 rARG0
-#define rs_rMIPS64_ARG0 rs_rARG0
-#define rMIPS64_ARG1 rARG1
-#define rs_rMIPS64_ARG1 rs_rARG1
-#define rMIPS64_ARG2 rARG2
-#define rs_rMIPS64_ARG2 rs_rARG2
-#define rMIPS64_ARG3 rARG3
-#define rs_rMIPS64_ARG3 rs_rARG3
-#define rMIPS64_ARG4 rARG4
-#define rs_rMIPS64_ARG4 rs_rARG4
-#define rMIPS64_ARG5 rARG5
-#define rs_rMIPS64_ARG5 rs_rARG5
-#define rMIPS64_ARG6 rARG6
-#define rs_rMIPS64_ARG6 rs_rARG6
-#define rMIPS64_ARG7 rARG7
-#define rs_rMIPS64_ARG7 rs_rARG7
-#define rMIPS64_FARG0 rFARG0
-#define rs_rMIPS64_FARG0 rs_rFARG0
-#define rMIPS64_FARG1 rFARG1
-#define rs_rMIPS64_FARG1 rs_rFARG1
-#define rMIPS64_FARG2 rFARG2
-#define rs_rMIPS64_FARG2 rs_rFARG2
-#define rMIPS64_FARG3 rFARG3
-#define rs_rMIPS64_FARG3 rs_rFARG3
-#define rMIPS64_FARG4 rFARG4
-#define rs_rMIPS64_FARG4 rs_rFARG4
-#define rMIPS64_FARG5 rFARG5
-#define rs_rMIPS64_FARG5 rs_rFARG5
-#define rMIPS64_FARG6 rFARG6
-#define rs_rMIPS64_FARG6 rs_rFARG6
-#define rMIPS64_FARG7 rFARG7
-#define rs_rMIPS64_FARG7 rs_rFARG7
-#define rMIPS64_RET0 rRESULT0
-#define rs_rMIPS64_RET0 rs_rRESULT0
-#define rMIPS64_RET1 rRESULT1
-#define rs_rMIPS64_RET1 rs_rRESULT1
-#define rMIPS64_INVOKE_TGT rT9d
-#define rs_rMIPS64_INVOKE_TGT rs_rT9d
-#define rMIPS64_COUNT RegStorage::kInvalidRegVal
-
-// RegisterLocation templates return values (r_V0).
-const RegLocation mips64_loc_c_return
- {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_ref
- {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
- RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_wide
- {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_float
- {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
- RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_double
- {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
- RegStorage(RegStorage::k64BitSolo, rD0), INVALID_SREG, INVALID_SREG};
-
-enum Mips64ShiftEncodings {
- kMips64Lsl = 0x0,
- kMips64Lsr = 0x1,
- kMips64Asr = 0x2,
- kMips64Ror = 0x3
-};
-
-// MIPS64 sync kinds (Note: support for kinds other than kSYNC0 may not exist).
-#define kSYNC0 0x00
-#define kSYNC_WMB 0x04
-#define kSYNC_MB 0x01
-#define kSYNC_ACQUIRE 0x11
-#define kSYNC_RELEASE 0x12
-#define kSYNC_RMB 0x13
-
-// TODO: Use smaller hammer when appropriate for target CPU.
-#define kST kSYNC0
-#define kSY kSYNC0
-
-/*
- * The following enum defines the list of supported Mips64 instructions by the
- * assembler. Their corresponding EncodingMap positions will be defined in
- * assemble_mips64.cc.
- */
-enum Mips64OpCode {
- kMips64First = 0,
- kMips6432BitData = kMips64First, // data [31..0].
- kMips64Addiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
- kMips64Addu, // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
- kMips64And, // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
- kMips64Andi, // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
- kMips64B, // b o [0001000000000000] o[15..0].
- kMips64Bal, // bal o [0000010000010001] o[15..0].
- // NOTE: the code tests the range kMips64Beq thru kMips64Bne, so adding an instruction in this
- // range may require updates.
- kMips64Beq, // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
- kMips64Beqz, // beqz s,o [000100] s[25..21] [00000] o[15..0].
- kMips64Bgez, // bgez s,o [000001] s[25..21] [00001] o[15..0].
- kMips64Bgtz, // bgtz s,o [000111] s[25..21] [00000] o[15..0].
- kMips64Blez, // blez s,o [000110] s[25..21] [00000] o[15..0].
- kMips64Bltz, // bltz s,o [000001] s[25..21] [00000] o[15..0].
- kMips64Bnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
- kMips64Bne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
- kMips64Break, // break code [000000] code[25..6] [001101].
- kMips64Daddiu, // daddiu t,s,imm16 [011001] s[25..21] t[20..16] imm16[15..11].
- kMips64Daddu, // daddu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101101].
- kMips64Dahi, // dahi s,imm16 [000001] s[25..21] [00110] imm16[15..11].
- kMips64Dati, // dati s,imm16 [000001] s[25..21] [11110] imm16[15..11].
- kMips64Daui, // daui t,s,imm16 [011101] s[25..21] t[20..16] imm16[15..11].
- kMips64Ddiv, // ddiv d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011110].
- kMips64Div, // div d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
- kMips64Dmod, // dmod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011110].
- kMips64Dmul, // dmul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011100].
- kMips64Dmfc1, // dmfc1 t,s [01000100001] t[20..16] s[15..11] [00000000000].
- kMips64Dmtc1, // dmtc1 t,s [01000100101] t[20..16] s[15..11] [00000000000].
- kMips64Drotr32, // drotr32 d,t,a [00000000001] t[20..16] d[15..11] a[10..6] [111110].
- kMips64Dsll, // dsll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111000].
- kMips64Dsll32, // dsll32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111100].
- kMips64Dsrl, // dsrl d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111010].
- kMips64Dsrl32, // dsrl32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111110].
- kMips64Dsra, // dsra d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111011].
- kMips64Dsra32, // dsra32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111111].
- kMips64Dsllv, // dsllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010100].
- kMips64Dsrlv, // dsrlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010110].
- kMips64Dsrav, // dsrav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010111].
- kMips64Dsubu, // dsubu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101111].
- kMips64Ext, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
- kMips64Faddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
- kMips64Fadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
- kMips64Fdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
- kMips64Fdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
- kMips64Fmuld, // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
- kMips64Fmuls, // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
- kMips64Fsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
- kMips64Fsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
- kMips64Fcvtsd, // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
- kMips64Fcvtsw, // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
- kMips64Fcvtds, // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
- kMips64Fcvtdw, // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
- kMips64Fcvtws, // cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
- kMips64Fcvtwd, // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
- kMips64Fmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
- kMips64Fmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
- kMips64Fnegd, // neg.d d,s [01000110001] [00000] s[15..11] d[10..6] [000111].
- kMips64Fnegs, // neg.s d,s [01000110000] [00000] s[15..11] d[10..6] [000111].
- kMips64Fldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
- kMips64Flwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
- kMips64Fsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
- kMips64Fswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
- kMips64Jal, // jal t [000011] t[25..0].
- kMips64Jalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
- kMips64Lahi, // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
- kMips64Lalo, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
- kMips64Lb, // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
- kMips64Lbu, // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
- kMips64Ld, // ld t,o(b) [110111] b[25..21] t[20..16] o[15..0].
- kMips64Lh, // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
- kMips64Lhu, // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
- kMips64Lui, // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
- kMips64Lw, // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
- kMips64Lwu, // lwu t,o(b) [100111] b[25..21] t[20..16] o[15..0].
- kMips64Mfc1, // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
- kMips64Mtc1, // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
- kMips64Move, // move d,s [000000] s[25..21] [00000] d[15..11] [00000101101].
- kMips64Mod, // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
- kMips64Mul, // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
- kMips64Nop, // nop [00000000000000000000000000000000].
- kMips64Nor, // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
- kMips64Or, // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
- kMips64Ori, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
- kMips64Sb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
- kMips64Sd, // sd t,o(b) [111111] b[25..21] t[20..16] o[15..0].
- kMips64Seb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
- kMips64Seh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
- kMips64Sh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
- kMips64Sll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
- kMips64Sllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
- kMips64Slt, // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
- kMips64Slti, // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
- kMips64Sltu, // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
- kMips64Sra, // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
- kMips64Srav, // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
- kMips64Srl, // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
- kMips64Srlv, // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
- kMips64Subu, // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
- kMips64Sw, // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
- kMips64Sync, // sync kind [000000] [0000000000000000] s[10..6] [001111].
- kMips64Xor, // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
- kMips64Xori, // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
- kMips64CurrPC, // jal to .+8 to materialize pc.
- kMips64Delta, // Psuedo for ori t, s, <label>-<label>.
- kMips64DeltaHi, // Pseudo for lui t, high16(<label>-<label>).
- kMips64DeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
- kMips64Undefined, // undefined [011001xxxxxxxxxxxxxxxx].
- kMips64Last
-};
-std::ostream& operator<<(std::ostream& os, const Mips64OpCode& rhs);
-
-// Instruction assembly field_loc kind.
-enum Mips64EncodingKind {
- kFmtUnused,
- kFmtBitBlt, // Bit string using end/start.
- kFmtDfp, // Double FP reg.
- kFmtSfp, // Single FP reg.
- kFmtBlt5_2, // Same 5-bit field to 2 locations.
-};
-std::ostream& operator<<(std::ostream& os, const Mips64EncodingKind& rhs);
-
-// Struct used to define the snippet positions for each MIPS64 opcode.
-struct Mips64EncodingMap {
- uint32_t skeleton;
- struct {
- Mips64EncodingKind kind;
- int end; // end for kFmtBitBlt, 1-bit slice end for FP regs.
- int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
- } field_loc[4];
- Mips64OpCode opcode;
- uint64_t flags;
- const char *name;
- const char* fmt;
- int size; // Note: size is in bytes.
-};
-
-extern Mips64EncodingMap EncodingMap[kMips64Last];
-
-#define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
-#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
-#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) // 2 offsets must fit.
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
diff --git a/compiler/dex/quick/mips64/target_mips64.cc b/compiler/dex/quick/mips64/target_mips64.cc
deleted file mode 100644
index 6ed9617..0000000
--- a/compiler/dex/quick/mips64/target_mips64.cc
+++ /dev/null
@@ -1,653 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips64.h"
-
-#include <inttypes.h>
-
-#include <string>
-
-#include "arch/mips64/instruction_set_features_mips64.h"
-#include "backend_mips64.h"
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "driver/compiler_driver.h"
-#include "mips64_lir.h"
-
-namespace art {
-
-static constexpr RegStorage core_regs_arr32[] =
- {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6,
- rs_rA7, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5,
- rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
-static constexpr RegStorage core_regs_arr64[] =
- {rs_rZEROd, rs_rATd, rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d,
- rs_rA6d, rs_rA7d, rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rS0d, rs_rS1d, rs_rS2d, rs_rS3d,
- rs_rS4d, rs_rS5d, rs_rS6d, rs_rS7d, rs_rT8d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd,
- rs_rFPd, rs_rRAd};
-#if 0
-// TODO: f24-f31 must be saved before calls and restored after.
-static constexpr RegStorage sp_regs_arr[] =
- {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
- rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
- rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
- rs_rF31};
-static constexpr RegStorage dp_regs_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
- rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
- rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
- rs_rD31};
-#else
-static constexpr RegStorage sp_regs_arr[] =
- {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
- rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
- rs_rF21, rs_rF22, rs_rF23};
-static constexpr RegStorage dp_regs_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
- rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
- rs_rD21, rs_rD22, rs_rD23};
-#endif
-static constexpr RegStorage reserved_regs_arr32[] =
- {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
-static constexpr RegStorage reserved_regs_arr64[] =
- {rs_rZEROd, rs_rATd, rs_rS0d, rs_rS1d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd, rs_rRAd};
-static constexpr RegStorage core_temps_arr32[] =
- {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6, rs_rA7, rs_rT0,
- rs_rT1, rs_rT2, rs_rT3, rs_rT8};
-static constexpr RegStorage core_temps_arr64[] =
- {rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d, rs_rA6d, rs_rA7d,
- rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rT8d};
-#if 0
-// TODO: f24-f31 must be saved before calls and restored after.
-static constexpr RegStorage sp_temps_arr[] =
- {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
- rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
- rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
- rs_rF31};
-static constexpr RegStorage dp_temps_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
- rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
- rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
- rs_rD31};
-#else
-static constexpr RegStorage sp_temps_arr[] =
- {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
- rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
- rs_rF21, rs_rF22, rs_rF23};
-static constexpr RegStorage dp_temps_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
- rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
- rs_rD21, rs_rD22, rs_rD23};
-#endif
-
-static constexpr ArrayRef<const RegStorage> empty_pool;
-static constexpr ArrayRef<const RegStorage> core_regs32(core_regs_arr32);
-static constexpr ArrayRef<const RegStorage> core_regs64(core_regs_arr64);
-static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
-static constexpr ArrayRef<const RegStorage> reserved_regs32(reserved_regs_arr32);
-static constexpr ArrayRef<const RegStorage> reserved_regs64(reserved_regs_arr64);
-static constexpr ArrayRef<const RegStorage> core_temps32(core_temps_arr32);
-static constexpr ArrayRef<const RegStorage> core_temps64(core_temps_arr64);
-static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
-
-RegLocation Mips64Mir2Lir::LocCReturn() {
- return mips64_loc_c_return;
-}
-
-RegLocation Mips64Mir2Lir::LocCReturnRef() {
- return mips64_loc_c_return_ref;
-}
-
-RegLocation Mips64Mir2Lir::LocCReturnWide() {
- return mips64_loc_c_return_wide;
-}
-
-RegLocation Mips64Mir2Lir::LocCReturnFloat() {
- return mips64_loc_c_return_float;
-}
-
-RegLocation Mips64Mir2Lir::LocCReturnDouble() {
- return mips64_loc_c_return_double;
-}
-
-// Return a target-dependent special register.
-RegStorage Mips64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
- RegStorage res_reg;
- switch (reg) {
- case kSelf: res_reg = rs_rS1; break;
- case kSuspend: res_reg = rs_rS0; break;
- case kLr: res_reg = rs_rRA; break;
- case kPc: res_reg = RegStorage::InvalidReg(); break;
- case kSp: res_reg = rs_rSP; break;
- case kArg0: res_reg = rs_rA0; break;
- case kArg1: res_reg = rs_rA1; break;
- case kArg2: res_reg = rs_rA2; break;
- case kArg3: res_reg = rs_rA3; break;
- case kArg4: res_reg = rs_rA4; break;
- case kArg5: res_reg = rs_rA5; break;
- case kArg6: res_reg = rs_rA6; break;
- case kArg7: res_reg = rs_rA7; break;
- case kFArg0: res_reg = rs_rF12; break;
- case kFArg1: res_reg = rs_rF13; break;
- case kFArg2: res_reg = rs_rF14; break;
- case kFArg3: res_reg = rs_rF15; break;
- case kFArg4: res_reg = rs_rF16; break;
- case kFArg5: res_reg = rs_rF17; break;
- case kFArg6: res_reg = rs_rF18; break;
- case kFArg7: res_reg = rs_rF19; break;
- case kRet0: res_reg = rs_rV0; break;
- case kRet1: res_reg = rs_rV1; break;
- case kInvokeTgt: res_reg = rs_rT9; break;
- case kHiddenArg: res_reg = rs_rT0; break;
- case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
- case kCount: res_reg = RegStorage::InvalidReg(); break;
- default: res_reg = RegStorage::InvalidReg();
- }
- return res_reg;
-}
-
-RegStorage Mips64Mir2Lir::InToRegStorageMips64Mapper::GetNextReg(ShortyArg arg) {
- const SpecialTargetRegister coreArgMappingToPhysicalReg[] =
- {kArg1, kArg2, kArg3, kArg4, kArg5, kArg6, kArg7};
- const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
- const SpecialTargetRegister fpArgMappingToPhysicalReg[] =
- {kFArg1, kFArg2, kFArg3, kFArg4, kFArg5, kFArg6, kFArg7};
- const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
-
- RegStorage result = RegStorage::InvalidReg();
- if (arg.IsFP()) {
- if (cur_arg_reg_ < fpArgMappingToPhysicalRegSize) {
- DCHECK(!arg.IsRef());
- result = m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_arg_reg_++],
- arg.IsWide() ? kWide : kNotWide);
- }
- } else {
- if (cur_arg_reg_ < coreArgMappingToPhysicalRegSize) {
- DCHECK(!(arg.IsWide() && arg.IsRef()));
- result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_arg_reg_++],
- arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
- }
- }
- return result;
-}
-
-/*
- * Decode the register id.
- */
-ResourceMask Mips64Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
- return ResourceMask::Bit((reg.IsFloat() ? kMips64FPReg0 : 0) + reg.GetRegNum());
-}
-
-ResourceMask Mips64Mir2Lir::GetPCUseDefEncoding() const {
- return ResourceMask::Bit(kMips64RegPC);
-}
-
-
-void Mips64Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
- ResourceMask* def_mask) {
- DCHECK(!lir->flags.use_def_invalid);
-
- // Mips64-specific resource map setup here.
- if (flags & REG_DEF_SP) {
- def_mask->SetBit(kMips64RegSP);
- }
-
- if (flags & REG_USE_SP) {
- use_mask->SetBit(kMips64RegSP);
- }
-
- if (flags & REG_DEF_LR) {
- def_mask->SetBit(kMips64RegLR);
- }
-}
-
-/* For dumping instructions */
-#define MIPS64_REG_COUNT 32
-static const char *mips64_reg_name[MIPS64_REG_COUNT] = {
- "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
- "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
-};
-
-/*
- * Interpret a format string and build a string no longer than size
- * See format key in assemble_mips64.cc.
- */
-std::string Mips64Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
- std::string buf;
- int i;
- const char *fmt_end = &fmt[strlen(fmt)];
- char tbuf[256];
- char nc;
- while (fmt < fmt_end) {
- int operand;
- if (*fmt == '!') {
- fmt++;
- DCHECK_LT(fmt, fmt_end);
- nc = *fmt++;
- if (nc == '!') {
- strcpy(tbuf, "!");
- } else {
- DCHECK_LT(fmt, fmt_end);
- DCHECK_LT(static_cast<unsigned>(nc-'0'), 4u);
- operand = lir->operands[nc-'0'];
- switch (*fmt++) {
- case 'b':
- strcpy(tbuf, "0000");
- for (i = 3; i >= 0; i--) {
- tbuf[i] += operand & 1;
- operand >>= 1;
- }
- break;
- case 's':
- snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
- break;
- case 'S':
- DCHECK_EQ(RegStorage::RegNum(operand) & 1, 0);
- snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
- break;
- case 'h':
- snprintf(tbuf, arraysize(tbuf), "%04x", operand);
- break;
- case 'M':
- case 'd':
- snprintf(tbuf, arraysize(tbuf), "%d", operand);
- break;
- case 'D':
- snprintf(tbuf, arraysize(tbuf), "%d", operand+1);
- break;
- case 'E':
- snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
- break;
- case 'F':
- snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
- break;
- case 't':
- snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
- reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 + (operand << 1),
- lir->target);
- break;
- case 'T':
- snprintf(tbuf, arraysize(tbuf), "0x%08x", operand << 2);
- break;
- case 'u': {
- int offset_1 = lir->operands[0];
- int offset_2 = NEXT_LIR(lir)->operands[0];
- uintptr_t target =
- (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) & ~3) +
- (offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
- snprintf(tbuf, arraysize(tbuf), "%p", reinterpret_cast<void*>(target));
- break;
- }
-
- /* Nothing to print for BLX_2 */
- case 'v':
- strcpy(tbuf, "see above");
- break;
- case 'r':
- DCHECK(operand >= 0 && operand < MIPS64_REG_COUNT);
- strcpy(tbuf, mips64_reg_name[operand]);
- break;
- case 'N':
- // Placeholder for delay slot handling
- strcpy(tbuf, "; nop");
- break;
- default:
- strcpy(tbuf, "DecodeError");
- break;
- }
- buf += tbuf;
- }
- } else {
- buf += *fmt++;
- }
- }
- return buf;
-}
-
-// FIXME: need to redo resource maps for MIPS64 - fix this at that time.
-void Mips64Mir2Lir::DumpResourceMask(LIR *mips64_lir, const ResourceMask& mask, const char *prefix) {
- char buf[256];
- buf[0] = 0;
-
- if (mask.Equals(kEncodeAll)) {
- strcpy(buf, "all");
- } else {
- char num[8];
- int i;
-
- for (i = 0; i < kMips64RegEnd; i++) {
- if (mask.HasBit(i)) {
- snprintf(num, arraysize(num), "%d ", i);
- strcat(buf, num);
- }
- }
-
- if (mask.HasBit(ResourceMask::kCCode)) {
- strcat(buf, "cc ");
- }
- if (mask.HasBit(ResourceMask::kFPStatus)) {
- strcat(buf, "fpcc ");
- }
- // Memory bits.
- if (mips64_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
- snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
- DECODE_ALIAS_INFO_REG(mips64_lir->flags.alias_info),
- DECODE_ALIAS_INFO_WIDE(mips64_lir->flags.alias_info) ? "(+1)" : "");
- }
- if (mask.HasBit(ResourceMask::kLiteral)) {
- strcat(buf, "lit ");
- }
-
- if (mask.HasBit(ResourceMask::kHeapRef)) {
- strcat(buf, "heap ");
- }
- if (mask.HasBit(ResourceMask::kMustNotAlias)) {
- strcat(buf, "noalias ");
- }
- }
- if (buf[0]) {
- LOG(INFO) << prefix << ": " << buf;
- }
-}
-
-/*
- * TUNING: is true leaf? Can't just use METHOD_IS_LEAF to determine as some
- * instructions might call out to C/assembly helper functions. Until
- * machinery is in place, always spill lr.
- */
-
-void Mips64Mir2Lir::AdjustSpillMask() {
- core_spill_mask_ |= (1 << rs_rRA.GetRegNum());
- num_core_spills_++;
-}
-
-/* Clobber all regs that might be used by an external C call */
-void Mips64Mir2Lir::ClobberCallerSave() {
- Clobber(rs_rZEROd);
- Clobber(rs_rATd);
- Clobber(rs_rV0d);
- Clobber(rs_rV1d);
- Clobber(rs_rA0d);
- Clobber(rs_rA1d);
- Clobber(rs_rA2d);
- Clobber(rs_rA3d);
- Clobber(rs_rA4d);
- Clobber(rs_rA5d);
- Clobber(rs_rA6d);
- Clobber(rs_rA7d);
- Clobber(rs_rT0d);
- Clobber(rs_rT1d);
- Clobber(rs_rT2d);
- Clobber(rs_rT3d);
- Clobber(rs_rT8d);
- Clobber(rs_rT9d);
- Clobber(rs_rK0d);
- Clobber(rs_rK1d);
- Clobber(rs_rGPd);
- Clobber(rs_rFPd);
- Clobber(rs_rRAd);
-
- Clobber(rs_rF0);
- Clobber(rs_rF1);
- Clobber(rs_rF2);
- Clobber(rs_rF3);
- Clobber(rs_rF4);
- Clobber(rs_rF5);
- Clobber(rs_rF6);
- Clobber(rs_rF7);
- Clobber(rs_rF8);
- Clobber(rs_rF9);
- Clobber(rs_rF10);
- Clobber(rs_rF11);
- Clobber(rs_rF12);
- Clobber(rs_rF13);
- Clobber(rs_rF14);
- Clobber(rs_rF15);
- Clobber(rs_rD0);
- Clobber(rs_rD1);
- Clobber(rs_rD2);
- Clobber(rs_rD3);
- Clobber(rs_rD4);
- Clobber(rs_rD5);
- Clobber(rs_rD6);
- Clobber(rs_rD7);
-}
-
-RegLocation Mips64Mir2Lir::GetReturnWideAlt() {
- UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS64";
- RegLocation res = LocCReturnWide();
- return res;
-}
-
-RegLocation Mips64Mir2Lir::GetReturnAlt() {
- UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS64";
- RegLocation res = LocCReturn();
- return res;
-}
-
-/* To be used when explicitly managing register use */
-void Mips64Mir2Lir::LockCallTemps() {
- LockTemp(rs_rMIPS64_ARG0);
- LockTemp(rs_rMIPS64_ARG1);
- LockTemp(rs_rMIPS64_ARG2);
- LockTemp(rs_rMIPS64_ARG3);
- LockTemp(rs_rMIPS64_ARG4);
- LockTemp(rs_rMIPS64_ARG5);
- LockTemp(rs_rMIPS64_ARG6);
- LockTemp(rs_rMIPS64_ARG7);
-}
-
-/* To be used when explicitly managing register use */
-void Mips64Mir2Lir::FreeCallTemps() {
- FreeTemp(rs_rMIPS64_ARG0);
- FreeTemp(rs_rMIPS64_ARG1);
- FreeTemp(rs_rMIPS64_ARG2);
- FreeTemp(rs_rMIPS64_ARG3);
- FreeTemp(rs_rMIPS64_ARG4);
- FreeTemp(rs_rMIPS64_ARG5);
- FreeTemp(rs_rMIPS64_ARG6);
- FreeTemp(rs_rMIPS64_ARG7);
- FreeTemp(TargetReg(kHiddenArg));
-}
-
-bool Mips64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED) {
- if (cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
- NewLIR1(kMips64Sync, 0 /* Only stype currently supported */);
- return true;
- } else {
- return false;
- }
-}
-
-void Mips64Mir2Lir::CompilerInitializeRegAlloc() {
- reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs32, core_regs64 , sp_regs,
- dp_regs, reserved_regs32, reserved_regs64,
- core_temps32, core_temps64, sp_temps,
- dp_temps));
-
- // Target-specific adjustments.
-
- // Alias single precision floats to appropriate half of overlapping double.
- for (RegisterInfo* info : reg_pool_->sp_regs_) {
- int sp_reg_num = info->GetReg().GetRegNum();
- int dp_reg_num = sp_reg_num;
- RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
- RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
- // Double precision register's master storage should refer to itself.
- DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
- // Redirect single precision's master storage to master.
- info->SetMaster(dp_reg_info);
- // Singles should show a single 32-bit mask bit, at first referring to the low half.
- DCHECK_EQ(info->StorageMask(), 0x1U);
- }
-
- // Alias 32bit W registers to corresponding 64bit X registers.
- for (RegisterInfo* info : reg_pool_->core_regs_) {
- int d_reg_num = info->GetReg().GetRegNum();
- RegStorage d_reg = RegStorage::Solo64(d_reg_num);
- RegisterInfo* d_reg_info = GetRegInfo(d_reg);
- // 64bit D register's master storage should refer to itself.
- DCHECK_EQ(d_reg_info, d_reg_info->Master());
- // Redirect 32bit master storage to 64bit D.
- info->SetMaster(d_reg_info);
- // 32bit should show a single 32-bit mask bit, at first referring to the low half.
- DCHECK_EQ(info->StorageMask(), 0x1U);
- }
-
- // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
- // TODO: adjust when we roll to hard float calling convention.
- reg_pool_->next_core_reg_ = 2;
- reg_pool_->next_sp_reg_ = 2;
- reg_pool_->next_dp_reg_ = 1;
-}
-
-/*
- * In the Arm code a it is typical to use the link register
- * to hold the target address. However, for Mips64 we must
- * ensure that all branch instructions can be restarted if
- * there is a trap in the shadow. Allocate a temp register.
- */
-RegStorage Mips64Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
- // NOTE: native pointer.
- LoadWordDisp(rs_rMIPS64_SELF, GetThreadOffset<8>(trampoline).Int32Value(), rs_rT9d);
- return rs_rT9d;
-}
-
-LIR* Mips64Mir2Lir::CheckSuspendUsingLoad() {
- RegStorage tmp = AllocTemp();
- // NOTE: native pointer.
- LoadWordDisp(rs_rMIPS64_SELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
- LIR *inst = LoadWordDisp(tmp, 0, tmp);
- FreeTemp(tmp);
- return inst;
-}
-
-LIR* Mips64Mir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
- DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadStore().
- ClobberCallerSave();
- LockCallTemps(); // Using fixed registers.
- RegStorage reg_ptr = TargetReg(kArg0);
- OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
- RegStorage r_tgt = LoadHelper(kQuickA64Load);
- LIR *ret = OpReg(kOpBlx, r_tgt);
- OpRegCopy(r_dest, TargetReg(kRet0));
- return ret;
-}
-
-LIR* Mips64Mir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
- DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadStore().
- DCHECK(!r_src.IsPair());
- ClobberCallerSave();
- LockCallTemps(); // Using fixed registers.
- RegStorage temp_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
- RegStorage temp_value = AllocTemp();
- OpRegCopy(temp_value, r_src);
- OpRegCopy(TargetReg(kArg0), temp_ptr);
- OpRegCopy(TargetReg(kArg1), temp_value);
- FreeTemp(temp_ptr);
- FreeTemp(temp_value);
- RegStorage r_tgt = LoadHelper(kQuickA64Store);
- return OpReg(kOpBlx, r_tgt);
-}
-
-void Mips64Mir2Lir::SpillCoreRegs() {
- if (num_core_spills_ == 0) {
- return;
- }
- uint32_t mask = core_spill_mask_;
- // Start saving from offset 0 so that ra ends up on the top of the frame.
- int offset = 0;
- OpRegImm(kOpSub, rs_rSPd, num_core_spills_ * 8);
- for (int reg = 0; mask; mask >>= 1, reg++) {
- if (mask & 0x1) {
- StoreWordDisp(rs_rMIPS64_SP, offset, RegStorage::Solo64(reg));
- offset += 8;
- }
- }
-}
-
-void Mips64Mir2Lir::UnSpillCoreRegs() {
- if (num_core_spills_ == 0) {
- return;
- }
- uint32_t mask = core_spill_mask_;
- int offset = frame_size_ - num_core_spills_ * 8;
- for (int reg = 0; mask; mask >>= 1, reg++) {
- if (mask & 0x1) {
- LoadWordDisp(rs_rMIPS64_SP, offset, RegStorage::Solo64(reg));
- offset += 8;
- }
- }
- OpRegImm(kOpAdd, rs_rSPd, frame_size_);
-}
-
-bool Mips64Mir2Lir::IsUnconditionalBranch(LIR* lir) {
- return (lir->opcode == kMips64B);
-}
-
-RegisterClass Mips64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
- if (UNLIKELY(is_volatile)) {
- // On Mips64, atomic 64-bit load/store requires a core register.
- // Smaller aligned load/store is atomic for both core and fp registers.
- if (size == k64 || size == kDouble) {
- return kCoreReg;
- }
- }
- // TODO: Verify that both core and fp registers are suitable for smaller sizes.
- return RegClassBySize(size);
-}
-
-Mips64Mir2Lir::Mips64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
- : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips64_mapper_(this) {
- for (int i = 0; i < kMips64Last; i++) {
- DCHECK_EQ(Mips64Mir2Lir::EncodingMap[i].opcode, i)
- << "Encoding order for " << Mips64Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(Mips64Mir2Lir::EncodingMap[i].opcode);
- }
-}
-
-Mir2Lir* Mips64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
- ArenaAllocator* const arena) {
- return new Mips64Mir2Lir(cu, mir_graph, arena);
-}
-
-uint64_t Mips64Mir2Lir::GetTargetInstFlags(int opcode) {
- DCHECK(!IsPseudoLirOp(opcode));
- return Mips64Mir2Lir::EncodingMap[opcode].flags;
-}
-
-const char* Mips64Mir2Lir::GetTargetInstName(int opcode) {
- DCHECK(!IsPseudoLirOp(opcode));
- return Mips64Mir2Lir::EncodingMap[opcode].name;
-}
-
-const char* Mips64Mir2Lir::GetTargetInstFmt(int opcode) {
- DCHECK(!IsPseudoLirOp(opcode));
- return Mips64Mir2Lir::EncodingMap[opcode].fmt;
-}
-
-void Mips64Mir2Lir::GenBreakpoint(int code) {
- NewLIR1(kMips64Break, code);
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips64/utility_mips64.cc b/compiler/dex/quick/mips64/utility_mips64.cc
deleted file mode 100644
index 38e354c..0000000
--- a/compiler/dex/quick/mips64/utility_mips64.cc
+++ /dev/null
@@ -1,875 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips64.h"
-
-#include "arch/mips64/instruction_set_features_mips64.h"
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "driver/compiler_driver.h"
-#include "mips64_lir.h"
-
-namespace art {
-
-/* This file contains codegen for the MIPS64 ISA. */
-
-LIR* Mips64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
- int opcode;
- // Must be both DOUBLE or both not DOUBLE.
- DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
- if (r_dest.Is64Bit()) {
- if (r_dest.IsDouble()) {
- if (r_src.IsDouble()) {
- opcode = kMips64Fmovd;
- } else {
- // Note the operands are swapped for the dmtc1 instr.
- RegStorage t_opnd = r_src;
- r_src = r_dest;
- r_dest = t_opnd;
- opcode = kMips64Dmtc1;
- }
- } else {
- DCHECK(r_src.IsDouble());
- opcode = kMips64Dmfc1;
- }
- } else {
- if (r_dest.IsSingle()) {
- if (r_src.IsSingle()) {
- opcode = kMips64Fmovs;
- } else {
- // Note the operands are swapped for the mtc1 instr.
- RegStorage t_opnd = r_src;
- r_src = r_dest;
- r_dest = t_opnd;
- opcode = kMips64Mtc1;
- }
- } else {
- DCHECK(r_src.IsSingle());
- opcode = kMips64Mfc1;
- }
- }
- LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
- if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
- res->flags.is_nop = true;
- }
- return res;
-}
-
-bool Mips64Mir2Lir::InexpensiveConstantInt(int32_t value) {
- // For encodings, see LoadConstantNoClobber below.
- return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
-}
-
-bool Mips64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
- UNUSED(value);
- return false; // TUNING
-}
-
-bool Mips64Mir2Lir::InexpensiveConstantLong(int64_t value) {
- UNUSED(value);
- return false; // TUNING
-}
-
-bool Mips64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
- UNUSED(value);
- return false; // TUNING
-}
-
-/*
- * Load a immediate using a shortcut if possible; otherwise
- * grab from the per-translation literal pool. If target is
- * a high register, build constant into a low register and copy.
- *
- * No additional register clobbering operation performed. Use this version when
- * 1) r_dest is freshly returned from AllocTemp or
- * 2) The codegen is under fixed register usage
- */
-LIR* Mips64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
- LIR *res;
-
- RegStorage r_dest_save = r_dest;
- int is_fp_reg = r_dest.IsFloat();
- if (is_fp_reg) {
- DCHECK(r_dest.IsSingle());
- r_dest = AllocTemp();
- }
-
- // See if the value can be constructed cheaply.
- if (value == 0) {
- res = NewLIR2(kMips64Move, r_dest.GetReg(), rZERO);
- } else if (IsUint<16>(value)) {
- // Use OR with (unsigned) immediate to encode 16b unsigned int.
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZERO, value);
- } else if (IsInt<16>(value)) {
- // Use ADD with (signed) immediate to encode 16b signed int.
- res = NewLIR3(kMips64Addiu, r_dest.GetReg(), rZERO, value);
- } else {
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
- if (value & 0xffff)
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
- }
-
- if (is_fp_reg) {
- NewLIR2(kMips64Mtc1, r_dest.GetReg(), r_dest_save.GetReg());
- FreeTemp(r_dest);
- }
-
- return res;
-}
-
-LIR* Mips64Mir2Lir::OpUnconditionalBranch(LIR* target) {
- LIR* res = NewLIR1(kMips64B, 0 /* offset to be patched during assembly*/);
- res->target = target;
- return res;
-}
-
-LIR* Mips64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
- Mips64OpCode opcode = kMips64Nop;
- switch (op) {
- case kOpBlx:
- opcode = kMips64Jalr;
- break;
- case kOpBx:
- return NewLIR2(kMips64Jalr, rZERO, r_dest_src.GetReg());
- break;
- default:
- LOG(FATAL) << "Bad case in OpReg";
- }
- return NewLIR2(opcode, rRAd, r_dest_src.GetReg());
-}
-
-LIR* Mips64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
- LIR *res;
- bool neg = (value < 0);
- int abs_value = (neg) ? -value : value;
- bool short_form = (abs_value & 0xff) == abs_value;
- bool is64bit = r_dest_src1.Is64Bit();
- RegStorage r_scratch;
- Mips64OpCode opcode = kMips64Nop;
- switch (op) {
- case kOpAdd:
- return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
- case kOpSub:
- return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
- default:
- LOG(FATAL) << "Bad case in OpRegImm";
- }
- if (short_form) {
- res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
- } else {
- if (is64bit) {
- r_scratch = AllocTempWide();
- res = LoadConstantWide(r_scratch, value);
- } else {
- r_scratch = AllocTemp();
- res = LoadConstant(r_scratch, value);
- }
- if (op == kOpCmp) {
- NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
- } else {
- NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
- }
- }
- return res;
-}
-
-LIR* Mips64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest,
- RegStorage r_src1, RegStorage r_src2) {
- Mips64OpCode opcode = kMips64Nop;
- bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit();
-
- switch (op) {
- case kOpAdd:
- if (is64bit) {
- opcode = kMips64Daddu;
- } else {
- opcode = kMips64Addu;
- }
- break;
- case kOpSub:
- if (is64bit) {
- opcode = kMips64Dsubu;
- } else {
- opcode = kMips64Subu;
- }
- break;
- case kOpAnd:
- opcode = kMips64And;
- break;
- case kOpMul:
- opcode = kMips64Mul;
- break;
- case kOpOr:
- opcode = kMips64Or;
- break;
- case kOpXor:
- opcode = kMips64Xor;
- break;
- case kOpLsl:
- if (is64bit) {
- opcode = kMips64Dsllv;
- } else {
- opcode = kMips64Sllv;
- }
- break;
- case kOpLsr:
- if (is64bit) {
- opcode = kMips64Dsrlv;
- } else {
- opcode = kMips64Srlv;
- }
- break;
- case kOpAsr:
- if (is64bit) {
- opcode = kMips64Dsrav;
- } else {
- opcode = kMips64Srav;
- }
- break;
- case kOpAdc:
- case kOpSbc:
- LOG(FATAL) << "No carry bit on MIPS64";
- break;
- default:
- LOG(FATAL) << "Bad case in OpRegRegReg";
- break;
- }
- return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
-}
-
-LIR* Mips64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
- LIR *res;
- Mips64OpCode opcode = kMips64Nop;
- bool short_form = true;
- bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit();
-
- switch (op) {
- case kOpAdd:
- if (is64bit) {
- if (IS_SIMM16(value)) {
- opcode = kMips64Daddiu;
- } else {
- short_form = false;
- opcode = kMips64Daddu;
- }
- } else {
- if (IS_SIMM16(value)) {
- opcode = kMips64Addiu;
- } else {
- short_form = false;
- opcode = kMips64Addu;
- }
- }
- break;
- case kOpSub:
- if (is64bit) {
- if (IS_SIMM16((-value))) {
- value = -value;
- opcode = kMips64Daddiu;
- } else {
- short_form = false;
- opcode = kMips64Dsubu;
- }
- } else {
- if (IS_SIMM16((-value))) {
- value = -value;
- opcode = kMips64Addiu;
- } else {
- short_form = false;
- opcode = kMips64Subu;
- }
- }
- break;
- case kOpLsl:
- if (is64bit) {
- DCHECK(value >= 0 && value <= 63);
- if (value >= 0 && value <= 31) {
- opcode = kMips64Dsll;
- } else {
- opcode = kMips64Dsll32;
- value = value - 32;
- }
- } else {
- DCHECK(value >= 0 && value <= 31);
- opcode = kMips64Sll;
- }
- break;
- case kOpLsr:
- if (is64bit) {
- DCHECK(value >= 0 && value <= 63);
- if (value >= 0 && value <= 31) {
- opcode = kMips64Dsrl;
- } else {
- opcode = kMips64Dsrl32;
- value = value - 32;
- }
- } else {
- DCHECK(value >= 0 && value <= 31);
- opcode = kMips64Srl;
- }
- break;
- case kOpAsr:
- if (is64bit) {
- DCHECK(value >= 0 && value <= 63);
- if (value >= 0 && value <= 31) {
- opcode = kMips64Dsra;
- } else {
- opcode = kMips64Dsra32;
- value = value - 32;
- }
- } else {
- DCHECK(value >= 0 && value <= 31);
- opcode = kMips64Sra;
- }
- break;
- case kOpAnd:
- if (IS_UIMM16((value))) {
- opcode = kMips64Andi;
- } else {
- short_form = false;
- opcode = kMips64And;
- }
- break;
- case kOpOr:
- if (IS_UIMM16((value))) {
- opcode = kMips64Ori;
- } else {
- short_form = false;
- opcode = kMips64Or;
- }
- break;
- case kOpXor:
- if (IS_UIMM16((value))) {
- opcode = kMips64Xori;
- } else {
- short_form = false;
- opcode = kMips64Xor;
- }
- break;
- case kOpMul:
- short_form = false;
- opcode = kMips64Mul;
- break;
- default:
- LOG(FATAL) << "Bad case in OpRegRegImm";
- break;
- }
-
- if (short_form) {
- res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value);
- } else {
- if (r_dest != r_src1) {
- res = LoadConstant(r_dest, value);
- NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
- } else {
- if (is64bit) {
- RegStorage r_scratch = AllocTempWide();
- res = LoadConstantWide(r_scratch, value);
- NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
- } else {
- RegStorage r_scratch = AllocTemp();
- res = LoadConstant(r_scratch, value);
- NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
- }
- }
- }
- return res;
-}
-
-LIR* Mips64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
- Mips64OpCode opcode = kMips64Nop;
- LIR *res;
- switch (op) {
- case kOpMov:
- opcode = kMips64Move;
- break;
- case kOpMvn:
- return NewLIR3(kMips64Nor, r_dest_src1.GetReg(), r_src2.GetReg(), rZEROd);
- case kOpNeg:
- if (r_dest_src1.Is64Bit())
- return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
- else
- return NewLIR3(kMips64Subu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
- case kOpAdd:
- case kOpAnd:
- case kOpMul:
- case kOpOr:
- case kOpSub:
- case kOpXor:
- return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
- case kOp2Byte:
- res = NewLIR2(kMips64Seb, r_dest_src1.GetReg(), r_src2.GetReg());
- return res;
- case kOp2Short:
- res = NewLIR2(kMips64Seh, r_dest_src1.GetReg(), r_src2.GetReg());
- return res;
- case kOp2Char:
- return NewLIR3(kMips64Andi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
- default:
- LOG(FATAL) << "Bad case in OpRegReg";
- UNREACHABLE();
- }
- return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
-}
-
-LIR* Mips64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
- MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
- UNIMPLEMENTED(FATAL);
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset,
- RegStorage r_src, MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
- UNIMPLEMENTED(FATAL);
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc,
- RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
- LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
- LIR *res = nullptr;
- DCHECK(r_dest.Is64Bit());
- RegStorage r_dest_save = r_dest;
- int is_fp_reg = r_dest.IsFloat();
- if (is_fp_reg) {
- DCHECK(r_dest.IsDouble());
- r_dest = AllocTemp();
- }
-
- int bit31 = (value & UINT64_C(0x80000000)) != 0;
-
- // Loads with 1 instruction.
- if (IsUint<16>(value)) {
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
- } else if (IsInt<16>(value)) {
- res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
- } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
- } else if (IsInt<32>(value)) {
- // Loads with 2 instructions.
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
- } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
- NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
- } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
- NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
- } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
- (value >> 32) <= (32767 - bit31)) {
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
- NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
- } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
- NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
- } else {
- int64_t tmp = value;
- int shift_cnt = 0;
- while ((tmp & 1) == 0) {
- tmp >>= 1;
- shift_cnt++;
- }
-
- if (IsUint<16>(tmp)) {
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
- NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
- shift_cnt & 0x1F);
- } else if (IsInt<16>(tmp)) {
- res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
- NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
- shift_cnt & 0x1F);
- } else if (IsInt<32>(tmp)) {
- // Loads with 3 instructions.
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp >> 16);
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp);
- NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
- shift_cnt & 0x1F);
- } else {
- tmp = value >> 16;
- shift_cnt = 16;
- while ((tmp & 1) == 0) {
- tmp >>= 1;
- shift_cnt++;
- }
-
- if (IsUint<16>(tmp)) {
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
- NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
- shift_cnt & 0x1F);
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
- } else if (IsInt<16>(tmp)) {
- res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
- NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
- shift_cnt & 0x1F);
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
- } else {
- // Loads with 3-4 instructions.
- uint64_t tmp2 = value;
- if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp2 >> 16);
- }
- if ((tmp2 & 0xFFFF) != 0) {
- if (res)
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp2);
- else
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp2);
- }
- if (bit31) {
- tmp2 += UINT64_C(0x100000000);
- }
- if (((tmp2 >> 32) & 0xFFFF) != 0) {
- NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
- }
- if (tmp2 & UINT64_C(0x800000000000)) {
- tmp2 += UINT64_C(0x1000000000000);
- }
- if ((tmp2 >> 48) != 0) {
- NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
- }
- }
- }
- }
-
- if (is_fp_reg) {
- NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
- FreeTemp(r_dest);
- }
-
- return res;
-}
-
-/* Load value from base + scaled index. */
-LIR* Mips64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
- int scale, OpSize size) {
- LIR *first = NULL;
- LIR *res;
- RegStorage t_reg;
- Mips64OpCode opcode = kMips64Nop;
- bool is64bit = r_dest.Is64Bit();
- if (is64bit) {
- t_reg = AllocTempWide();
- } else {
- t_reg = AllocTemp();
- }
-
- if (r_dest.IsFloat()) {
- DCHECK(r_dest.IsSingle());
- DCHECK((size == k32) || (size == kSingle) || (size == kReference));
- size = kSingle;
- } else if (is64bit) {
- size = k64;
- } else {
- if (size == kSingle)
- size = k32;
- }
-
- if (!scale) {
- if (is64bit) {
- first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
- } else {
- first = NewLIR3(kMips64Addu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
- }
- } else {
- first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
- NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
- }
-
- switch (size) {
- case k64:
- opcode = kMips64Ld;
- break;
- case kSingle:
- opcode = kMips64Flwc1;
- break;
- case k32:
- case kReference:
- opcode = kMips64Lw;
- break;
- case kUnsignedHalf:
- opcode = kMips64Lhu;
- break;
- case kSignedHalf:
- opcode = kMips64Lh;
- break;
- case kUnsignedByte:
- opcode = kMips64Lbu;
- break;
- case kSignedByte:
- opcode = kMips64Lb;
- break;
- default:
- LOG(FATAL) << "Bad case in LoadBaseIndexed";
- }
-
- res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg());
- FreeTemp(t_reg);
- return (first) ? first : res;
-}
-
-/* Store value base base + scaled index. */
-LIR* Mips64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
- int scale, OpSize size) {
- LIR *first = NULL;
- Mips64OpCode opcode = kMips64Nop;
- RegStorage t_reg = AllocTemp();
-
- if (r_src.IsFloat()) {
- DCHECK(r_src.IsSingle());
- DCHECK((size == k32) || (size == kSingle) || (size == kReference));
- size = kSingle;
- } else {
- if (size == kSingle)
- size = k32;
- }
-
- if (!scale) {
- first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
- } else {
- first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
- NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
- }
-
- switch (size) {
- case kSingle:
- opcode = kMips64Fswc1;
- break;
- case k32:
- case kReference:
- opcode = kMips64Sw;
- break;
- case kUnsignedHalf:
- case kSignedHalf:
- opcode = kMips64Sh;
- break;
- case kUnsignedByte:
- case kSignedByte:
- opcode = kMips64Sb;
- break;
- default:
- LOG(FATAL) << "Bad case in StoreBaseIndexed";
- }
- NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg());
- return first;
-}
-
-// FIXME: don't split r_dest into 2 containers.
-LIR* Mips64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) {
-/*
- * Load value from base + displacement. Optionally perform null check
- * on base (which must have an associated s_reg and MIR). If not
- * performing null check, incoming MIR can be null. IMPORTANT: this
- * code must not allocate any new temps. If a new register is needed
- * and base and dest are the same, spill some other register to
- * rlp and then restore.
- */
- LIR *res;
- LIR *load = NULL;
- Mips64OpCode opcode = kMips64Nop;
- bool short_form = IS_SIMM16(displacement);
-
- switch (size) {
- case k64:
- case kDouble:
- r_dest = Check64BitReg(r_dest);
- if (!r_dest.IsFloat())
- opcode = kMips64Ld;
- else
- opcode = kMips64Fldc1;
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case k32:
- case kSingle:
- case kReference:
- opcode = kMips64Lw;
- if (r_dest.IsFloat()) {
- opcode = kMips64Flwc1;
- DCHECK(r_dest.IsSingle());
- }
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case kUnsignedHalf:
- opcode = kMips64Lhu;
- DCHECK_EQ((displacement & 0x1), 0);
- break;
- case kSignedHalf:
- opcode = kMips64Lh;
- DCHECK_EQ((displacement & 0x1), 0);
- break;
- case kUnsignedByte:
- opcode = kMips64Lbu;
- break;
- case kSignedByte:
- opcode = kMips64Lb;
- break;
- default:
- LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
- }
-
- if (short_form) {
- load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
- } else {
- RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
- res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
- load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
- if (r_tmp != r_dest)
- FreeTemp(r_tmp);
- }
-
- if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK_EQ(r_base, rs_rMIPS64_SP);
- AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
- }
- return res;
-}
-
-LIR* Mips64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) {
- if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
- displacement & 0x7)) {
- // TODO: use lld/scd instructions for Mips64.
- // Do atomic 64-bit load.
- return GenAtomic64Load(r_base, displacement, r_dest);
- }
-
- // TODO: base this on target.
- if (size == kWord) {
- size = k64;
- }
- LIR* load;
- load = LoadBaseDispBody(r_base, displacement, r_dest, size);
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- GenMemBarrier(kLoadAny);
- }
-
- return load;
-}
-
-// FIXME: don't split r_dest into 2 containers.
-LIR* Mips64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) {
- LIR *res;
- LIR *store = NULL;
- Mips64OpCode opcode = kMips64Nop;
- bool short_form = IS_SIMM16(displacement);
-
- switch (size) {
- case k64:
- case kDouble:
- r_src = Check64BitReg(r_src);
- if (!r_src.IsFloat())
- opcode = kMips64Sd;
- else
- opcode = kMips64Fsdc1;
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case k32:
- case kSingle:
- case kReference:
- opcode = kMips64Sw;
- if (r_src.IsFloat()) {
- opcode = kMips64Fswc1;
- DCHECK(r_src.IsSingle());
- }
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case kUnsignedHalf:
- case kSignedHalf:
- opcode = kMips64Sh;
- DCHECK_EQ((displacement & 0x1), 0);
- break;
- case kUnsignedByte:
- case kSignedByte:
- opcode = kMips64Sb;
- break;
- default:
- LOG(FATAL) << "Bad case in StoreBaseDispBody";
- }
-
- if (short_form) {
- store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
- } else {
- RegStorage r_scratch = AllocTemp();
- res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
- store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
- FreeTemp(r_scratch);
- }
-
- if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK_EQ(r_base, rs_rMIPS64_SP);
- AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
- }
-
- return res;
-}
-
-LIR* Mips64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) {
- if (is_volatile == kVolatile) {
- // Ensure that prior accesses become visible to other threads first.
- GenMemBarrier(kAnyStore);
- }
-
- LIR* store;
- if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
- displacement & 0x7)) {
- // TODO - use lld/scd instructions for Mips64
- // Do atomic 64-bit load.
- store = GenAtomic64Store(r_base, displacement, r_src);
- } else {
- // TODO: base this on target.
- if (size == kWord) {
- size = k64;
- }
- store = StoreBaseDispBody(r_base, displacement, r_src, size);
- }
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // Preserve order with respect to any subsequent volatile loads.
- // We need StoreLoad, but that generally requires the most expensive barrier.
- GenMemBarrier(kAnyAny);
- }
-
- return store;
-}
-
-LIR* Mips64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
- LOG(FATAL) << "Unexpected use of OpMem for MIPS64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
- UNUSED(cc, target);
- LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
- return OpReg(op, r_tgt);
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 8edc5fc..0b480a0 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -27,7 +27,7 @@
class Mir2Lir::SpecialSuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
SpecialSuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont),
+ : LIRSlowPath(m2l, branch, cont),
num_used_args_(0u) {
}
@@ -406,6 +406,7 @@
bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
DCHECK(special.flags & kInlineSpecial);
current_dalvik_offset_ = mir->offset;
+ DCHECK(current_mir_ == nullptr); // Safepoints attributed to prologue.
MIR* return_mir = nullptr;
bool successful = false;
EnsureInitializedArgMappingToPhysicalReg();
@@ -540,7 +541,7 @@
GenMoveException(rl_dest);
break;
- case Instruction::RETURN_VOID_BARRIER:
+ case Instruction::RETURN_VOID_NO_BARRIER:
case Instruction::RETURN_VOID:
if (((cu_->access_flags & kAccConstructor) != 0) &&
cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
@@ -587,9 +588,6 @@
case Instruction::MOVE_FROM16:
case Instruction::MOVE_OBJECT_FROM16:
StoreValue(rl_dest, rl_src[0]);
- if (rl_src[0].is_const && (mir_graph_->ConstantValue(rl_src[0]) == 0)) {
- Workaround7250540(rl_dest, RegStorage::InvalidReg());
- }
break;
case Instruction::MOVE_WIDE:
@@ -1276,6 +1274,7 @@
}
current_dalvik_offset_ = mir->offset;
+ current_mir_ = mir;
int opcode = mir->dalvikInsn.opcode;
GenPrintLabel(mir);
@@ -1376,6 +1375,7 @@
LIR* Mir2Lir::LIRSlowPath::GenerateTargetLabel(int opcode) {
m2l_->SetCurrentDexPc(current_dex_pc_);
+ m2l_->current_mir_ = current_mir_;
LIR* target = m2l_->NewLIR0(opcode);
fromfast_->target = target;
return target;
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 9a56171..cca4e5a 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -131,6 +131,7 @@
#define MAX_ASSEMBLER_RETRIES 50
class BasicBlock;
+class BitVector;
struct CallInfo;
struct CompilationUnit;
struct InlineMethod;
@@ -490,9 +491,10 @@
class LIRSlowPath : public ArenaObject<kArenaAllocSlowPaths> {
public:
- LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
- LIR* cont = nullptr) :
- m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
+ LIRSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont = nullptr)
+ : m2l_(m2l), cu_(m2l->cu_),
+ current_dex_pc_(m2l->current_dalvik_offset_), current_mir_(m2l->current_mir_),
+ fromfast_(fromfast), cont_(cont) {
}
virtual ~LIRSlowPath() {}
virtual void Compile() = 0;
@@ -511,6 +513,7 @@
Mir2Lir* const m2l_;
CompilationUnit* const cu_;
const DexOffset current_dex_pc_;
+ MIR* current_mir_;
LIR* const fromfast_;
LIR* const cont_;
};
@@ -582,14 +585,16 @@
* TUNING: If use of these utilities becomes more common on 32-bit builds, it
* may be worth conditionally-compiling a set of identity functions here.
*/
- uint32_t WrapPointer(void* pointer) {
+ template <typename T>
+ uint32_t WrapPointer(const T* pointer) {
uint32_t res = pointer_storage_.size();
pointer_storage_.push_back(pointer);
return res;
}
- void* UnwrapPointer(size_t index) {
- return pointer_storage_[index];
+ template <typename T>
+ const T* UnwrapPointer(size_t index) {
+ return reinterpret_cast<const T*>(pointer_storage_[index]);
}
// strdup(), but allocates from the arena.
@@ -670,6 +675,7 @@
bool VerifyCatchEntries();
void CreateMappingTables();
void CreateNativeGcMap();
+ void CreateNativeGcMapWithoutRegisterPromotion();
int AssignLiteralOffset(CodeOffset offset);
int AssignSwitchTablesOffset(CodeOffset offset);
int AssignFillArrayDataOffset(CodeOffset offset);
@@ -1379,7 +1385,7 @@
virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
virtual void OpEndIT(LIR* it) = 0;
virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0;
- virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0;
+ virtual void OpPcRelLoad(RegStorage reg, LIR* target) = 0;
virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0;
virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0;
@@ -1459,9 +1465,6 @@
virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
- // Temp workaround
- void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg);
-
virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0;
// Queries for backend support for vectors
@@ -1729,6 +1732,16 @@
// See CheckRegLocationImpl.
void CheckRegLocation(RegLocation rl) const;
+ // Find the references at the beginning of a basic block (for generating GC maps).
+ void InitReferenceVRegs(BasicBlock* bb, BitVector* references);
+
+ // Update references from prev_mir to mir in the same BB. If mir is null or before
+ // prev_mir, report failure (return false) and update references to the end of the BB.
+ bool UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references);
+
+ // Update references from prev_mir to mir.
+ void UpdateReferenceVRegs(MIR* mir, MIR* prev_mir, BitVector* references);
+
public:
// TODO: add accessors for these.
LIR* literal_list_; // Constants.
@@ -1745,8 +1758,7 @@
ArenaVector<FillArrayData*> fill_array_data_;
ArenaVector<RegisterInfo*> tempreg_info_;
ArenaVector<RegisterInfo*> reginfo_map_;
- ArenaVector<void*> pointer_storage_;
- CodeOffset current_code_offset_; // Working byte offset of machine instructons.
+ ArenaVector<const void*> pointer_storage_;
CodeOffset data_offset_; // starting offset of literal pool.
size_t total_size_; // header + code size.
LIR* block_label_list_;
@@ -1761,6 +1773,7 @@
* The low-level LIR creation utilites will pull it from here. Rework this.
*/
DexOffset current_dalvik_offset_;
+ MIR* current_mir_;
size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size.
std::unique_ptr<RegisterPool> reg_pool_;
/*
@@ -1799,6 +1812,9 @@
// to deduplicate the masks.
ResourceMaskCache mask_cache_;
+ // Record the MIR that generated a given safepoint (nullptr for prologue safepoints).
+ ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
+
protected:
// ABI support
class ShortyArg {
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 922f2f7..6d28984 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -45,7 +45,6 @@
#include "dex/quick/arm/backend_arm.h"
#include "dex/quick/arm64/backend_arm64.h"
#include "dex/quick/mips/backend_mips.h"
-#include "dex/quick/mips64/backend_mips64.h"
#include "dex/quick/x86/backend_x86.h"
namespace art {
@@ -250,7 +249,7 @@
Instruction::INVOKE_DIRECT,
Instruction::INVOKE_STATIC,
Instruction::INVOKE_INTERFACE,
- Instruction::RETURN_VOID_BARRIER,
+ Instruction::RETURN_VOID_NO_BARRIER,
Instruction::INVOKE_VIRTUAL_RANGE,
Instruction::INVOKE_SUPER_RANGE,
Instruction::INVOKE_DIRECT_RANGE,
@@ -808,10 +807,9 @@
mir_to_lir = Arm64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
case kMips:
- mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
+ // Fall-through.
case kMips64:
- mir_to_lir = Mips64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
case kX86:
// Fall-through.
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 6f26b78..118ab1d 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1586,13 +1586,11 @@
int32_t raw_index, int scale, int32_t table_or_disp) {
int disp;
if (entry->opcode == kX86PcRelLoadRA) {
- Mir2Lir::EmbeddedData *tab_rec =
- reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(table_or_disp));
+ const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(table_or_disp);
disp = tab_rec->offset;
} else {
DCHECK(entry->opcode == kX86PcRelAdr);
- Mir2Lir::EmbeddedData *tab_rec =
- reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(raw_base_or_table));
+ const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(raw_base_or_table);
disp = tab_rec->offset;
}
if (entry->opcode == kX86PcRelLoadRA) {
@@ -1794,8 +1792,7 @@
DCHECK_EQ(lir->opcode, kX86Lea64RM) << "Unknown instruction: " << X86Mir2Lir::EncodingMap[lir->opcode].name;
DCHECK_EQ(lir->operands[1], static_cast<int>(kRIPReg));
// Grab the target offset from the saved data.
- Mir2Lir::EmbeddedData* tab_rec =
- reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(lir->operands[4]));
+ const EmbeddedData* tab_rec = UnwrapPointer<Mir2Lir::EmbeddedData>(lir->operands[4]);
CodeOffset target = tab_rec->offset;
// Handle 64 bit RIP addressing.
// Offset is relative to next instruction.
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 11c1465..abee872 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -193,7 +193,7 @@
class StackOverflowSlowPath : public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) {
+ : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) {
}
void Compile() OVERRIDE {
m2l_->ResetRegPool();
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 20163b4..040a8c4 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -296,7 +296,7 @@
LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
void OpEndIT(LIR* it) OVERRIDE;
LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
- LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
+ void OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 91168c7..4eb626c 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1324,7 +1324,7 @@
return true;
}
-LIR* X86Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+void X86Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
if (cu_->target64) {
// We can do this directly using RIP addressing.
// We don't know the proper offset for the value, so pick one that will force
@@ -1334,7 +1334,7 @@
LIR* res = NewLIR3(kX86Mov32RM, reg.GetReg(), kRIPReg, 256);
res->target = target;
res->flags.fixup = kFixupLoad;
- return res;
+ return;
}
CHECK(base_of_code_ != nullptr);
@@ -1353,11 +1353,9 @@
// 4 byte offset. We will fix this up in the assembler later to have the right
// value.
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
- LIR *res = RawLIR(current_dalvik_offset_, kX86Mov32RM, reg.GetReg(), reg.GetReg(), 256,
- 0, 0, target);
+ LIR* res = NewLIR3(kX86Mov32RM, reg.GetReg(), reg.GetReg(), 256);
res->target = target;
res->flags.fixup = kFixupLoad;
- return res;
}
LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
@@ -1412,7 +1410,7 @@
public:
ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in,
RegStorage index_in, RegStorage array_base_in, int32_t len_offset_in)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ : LIRSlowPath(m2l, branch_in),
index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) {
}
@@ -1460,7 +1458,7 @@
public:
ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in,
int32_t index_in, RegStorage array_base_in, int32_t len_offset_in)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ : LIRSlowPath(m2l, branch_in),
index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) {
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index dbe4848..f128eb7 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -390,7 +390,7 @@
break;
}
case 'p': {
- EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
+ const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(operand);
buf += StringPrintf("0x%08x", tab_rec->offset);
break;
}
@@ -1062,8 +1062,7 @@
for (LIR* p : method_address_insns_) {
DCHECK_EQ(p->opcode, kX86Mov32RI);
uint32_t target_method_idx = p->operands[2];
- const DexFile* target_dex_file =
- reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
+ const DexFile* target_dex_file = UnwrapPointer<DexFile>(p->operands[3]);
// The offset to patch is the last 4 bytes of the instruction.
int patch_offset = p->offset + p->flags.size - 4;
@@ -1075,8 +1074,7 @@
for (LIR* p : class_type_address_insns_) {
DCHECK_EQ(p->opcode, kX86Mov32RI);
- const DexFile* class_dex_file =
- reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
+ const DexFile* class_dex_file = UnwrapPointer<DexFile>(p->operands[3]);
uint32_t target_type_idx = p->operands[2];
// The offset to patch is the last 4 bytes of the instruction.
@@ -1090,8 +1088,7 @@
for (LIR* p : call_method_insns_) {
DCHECK_EQ(p->opcode, kX86CallI);
uint32_t target_method_idx = p->operands[1];
- const DexFile* target_dex_file =
- reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
+ const DexFile* target_dex_file = UnwrapPointer<DexFile>(p->operands[2]);
// The offset to patch is the last 4 bytes of the instruction.
int patch_offset = p->offset + p->flags.size - 4;
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index cdf71b6..d692d26 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -27,8 +27,9 @@
class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
public:
QuickCompilerCallbacks(VerificationResults* verification_results,
- DexFileToMethodInlinerMap* method_inliner_map)
- : verification_results_(verification_results),
+ DexFileToMethodInlinerMap* method_inliner_map,
+ CompilerCallbacks::CallbackMode mode)
+ : CompilerCallbacks(mode), verification_results_(verification_results),
method_inliner_map_(method_inliner_map) {
CHECK(verification_results != nullptr);
CHECK(method_inliner_map != nullptr);
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 4a35e9f..8babc28 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -39,6 +39,22 @@
return soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
}
+inline mirror::Class* CompilerDriver::ResolveClass(
+ const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader, uint16_t cls_index,
+ const DexCompilationUnit* mUnit) {
+ DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
+ DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ mirror::Class* cls = mUnit->GetClassLinker()->ResolveType(
+ *mUnit->GetDexFile(), cls_index, dex_cache, class_loader);
+ DCHECK_EQ(cls == nullptr, soa.Self()->IsExceptionPending());
+ if (UNLIKELY(cls == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ }
+ return cls;
+}
+
inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit) {
@@ -46,14 +62,7 @@
DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
const DexFile::MethodId& referrer_method_id =
mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
- mirror::Class* referrer_class = mUnit->GetClassLinker()->ResolveType(
- *mUnit->GetDexFile(), referrer_method_id.class_idx_, dex_cache, class_loader);
- DCHECK_EQ(referrer_class == nullptr, soa.Self()->IsExceptionPending());
- if (UNLIKELY(referrer_class == nullptr)) {
- // Clean up any exception left by type resolution.
- soa.Self()->ClearException();
- }
- return referrer_class;
+ return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit);
}
inline mirror::ArtField* CompilerDriver::ResolveFieldWithDexFile(
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index ff4e0d8..100d49a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -491,11 +491,12 @@
}
}
-static DexToDexCompilationLevel GetDexToDexCompilationlevel(
+DexToDexCompilationLevel CompilerDriver::GetDexToDexCompilationlevel(
Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file,
- const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile::ClassDef& class_def) {
auto* const runtime = Runtime::Current();
- if (runtime->UseJit()) {
+ if (runtime->UseJit() || GetCompilerOptions().VerifyAtRuntime()) {
+ // Verify at runtime shouldn't dex to dex since we didn't resolve of verify.
return kDontDexToDexCompile;
}
const char* descriptor = dex_file.GetClassDescriptor(class_def);
@@ -605,12 +606,22 @@
LoadImageClasses(timings);
VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false);
- Resolve(class_loader, dex_files, thread_pool, timings);
- VLOG(compiler) << "Resolve: " << GetMemoryUsageString(false);
+ const bool verification_enabled = compiler_options_->IsVerificationEnabled();
+ const bool never_verify = compiler_options_->NeverVerify();
- if (!compiler_options_->IsVerificationEnabled()) {
+ // We need to resolve for never_verify since it needs to run dex to dex to add the
+ // RETURN_VOID_NO_BARRIER.
+ if (never_verify || verification_enabled) {
+ Resolve(class_loader, dex_files, thread_pool, timings);
+ VLOG(compiler) << "Resolve: " << GetMemoryUsageString(false);
+ }
+
+ if (never_verify) {
VLOG(compiler) << "Verify none mode specified, skipping verification.";
SetVerified(class_loader, dex_files, thread_pool, timings);
+ }
+
+ if (!verification_enabled) {
return;
}
@@ -1387,8 +1398,11 @@
}
} else {
bool method_in_image = heap->FindSpaceFromObject(method, false)->IsImageSpace();
- if (method_in_image || compiling_boot) {
+ if (method_in_image || compiling_boot || runtime->UseJit()) {
// We know we must be able to get to the method in the image, so use that pointer.
+ // In the case where we are the JIT, we can always use direct pointers since we know where
+ // the method and its code are / will be. We don't sharpen to interpreter bridge since we
+ // check IsQuickToInterpreterBridge above.
CHECK(!method->IsAbstract());
*type = sharp_type;
*direct_method = force_relocations ? -1 : reinterpret_cast<uintptr_t>(method);
@@ -2090,6 +2104,8 @@
return;
}
+ CompilerDriver* const driver = manager->GetCompiler();
+
// Can we run DEX-to-DEX compiler on this class ?
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
{
@@ -2097,8 +2113,8 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- dex_to_dex_compilation_level = GetDexToDexCompilationlevel(soa.Self(), class_loader, dex_file,
- class_def);
+ dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel(
+ soa.Self(), class_loader, dex_file, class_def);
}
ClassDataItemIterator it(dex_file, class_data);
// Skip fields
@@ -2108,7 +2124,6 @@
while (it.HasNextInstanceField()) {
it.Next();
}
- CompilerDriver* driver = manager->GetCompiler();
bool compilation_enabled = driver->IsClassToCompile(
dex_file.StringByTypeIdx(class_def.class_idx_));
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 28a8245..b825293 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -228,6 +228,12 @@
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* ResolveClass(
+ const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader, uint16_t type_index,
+ const DexCompilationUnit* mUnit)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Resolve a field. Returns nullptr on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
mirror::ArtField* ResolveField(
@@ -468,6 +474,10 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
+ DexToDexCompilationLevel GetDexToDexCompilationlevel(
+ Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file,
+ const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index a02e25e..5ebc029 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -41,7 +41,7 @@
TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
compiler_driver_->CompileAll(class_loader,
- Runtime::Current()->GetCompileTimeClassPath(class_loader),
+ GetDexFiles(class_loader),
&timings);
t.NewTiming("MakeAllExecutable");
MakeAllExecutable(class_loader);
@@ -66,8 +66,7 @@
}
void MakeAllExecutable(jobject class_loader) {
- const std::vector<const DexFile*>& class_path
- = Runtime::Current()->GetCompileTimeClassPath(class_loader);
+ const std::vector<const DexFile*> class_path = GetDexFiles(class_loader);
for (size_t i = 0; i != class_path.size(); ++i) {
const DexFile* dex_file = class_path[i];
CHECK(dex_file != NULL);
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 5042c75..d06ec27 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -32,7 +32,8 @@
public:
enum CompilerFilter {
kVerifyNone, // Skip verification and compile nothing except JNI stubs.
- kInterpretOnly, // Compile nothing except JNI stubs.
+ kInterpretOnly, // Verify, and compile only JNI stubs.
+ kVerifyAtRuntime, // Only compile JNI stubs and verify at runtime.
kSpace, // Maximize space savings.
kBalanced, // Try to get the best performance return on compilation investment.
kSpeed, // Maximize runtime performance.
@@ -81,13 +82,23 @@
compiler_filter_ = compiler_filter;
}
+ bool VerifyAtRuntime() const {
+ return compiler_filter_ == CompilerOptions::kVerifyAtRuntime;
+ }
+
bool IsCompilationEnabled() const {
- return ((compiler_filter_ != CompilerOptions::kVerifyNone) &&
- (compiler_filter_ != CompilerOptions::kInterpretOnly));
+ return compiler_filter_ != CompilerOptions::kVerifyNone &&
+ compiler_filter_ != CompilerOptions::kInterpretOnly &&
+ compiler_filter_ != CompilerOptions::kVerifyAtRuntime;
}
bool IsVerificationEnabled() const {
- return (compiler_filter_ != CompilerOptions::kVerifyNone);
+ return compiler_filter_ != CompilerOptions::kVerifyNone &&
+ compiler_filter_ != CompilerOptions::kVerifyAtRuntime;
+ }
+
+ bool NeverVerify() const {
+ return compiler_filter_ == CompilerOptions::kVerifyNone;
}
size_t GetHugeMethodThreshold() const {
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index fd3a912..8e2d175 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -46,11 +46,7 @@
EXPECT_EQ(expected_value, ef->FindDynamicSymbolAddress(symbol_name)); \
} while (false)
-#if defined(ART_USE_OPTIMIZING_COMPILER)
-TEST_F(ElfWriterTest, DISABLED_dlsym) {
-#else
TEST_F(ElfWriterTest, dlsym) {
-#endif
std::string elf_location = GetCoreOatLocation();
std::string elf_filename = GetSystemImageFilename(elf_location.c_str(), kRuntimeISA);
LOG(INFO) << "elf_filename=" << elf_filename;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index d238b2c..c1555aa 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -133,7 +133,7 @@
return false;
}
std::string error_msg;
- oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_location, &error_msg);
+ oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_location, nullptr, &error_msg);
if (oat_file_ == nullptr) {
PLOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
<< ": " << error_msg;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index beb5755..df5d5cc 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -89,7 +89,8 @@
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap);
callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
- method_inliner_map_.get()));
+ method_inliner_map_.get(),
+ CompilerCallbacks::CallbackMode::kCompileApp));
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(),
Compiler::kQuick, instruction_set, instruction_set_features_.get(), false,
@@ -217,20 +218,21 @@
auto* const mapping_table = compiled_method->GetMappingTable();
auto* const vmap_table = compiled_method->GetVmapTable();
auto* const gc_map = compiled_method->GetGcMap();
+ CHECK(gc_map != nullptr) << PrettyMethod(method);
// Write out pre-header stuff.
uint8_t* const mapping_table_ptr = code_cache->AddDataArray(
self, mapping_table->data(), mapping_table->data() + mapping_table->size());
- if (mapping_table == nullptr) {
+ if (mapping_table_ptr == nullptr) {
return false; // Out of data cache.
}
uint8_t* const vmap_table_ptr = code_cache->AddDataArray(
self, vmap_table->data(), vmap_table->data() + vmap_table->size());
- if (vmap_table == nullptr) {
+ if (vmap_table_ptr == nullptr) {
return false; // Out of data cache.
}
uint8_t* const gc_map_ptr = code_cache->AddDataArray(
self, gc_map->data(), gc_map->data() + gc_map->size());
- if (gc_map == nullptr) {
+ if (gc_map_ptr == nullptr) {
return false; // Out of data cache.
}
// Don't touch this until you protect / unprotect the code.
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index f513ea8..70bfb81 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -1510,25 +1510,25 @@
EXPECT_EQ(i9, 9);
EXPECT_EQ(i10, 10);
- jint i11 = bit_cast<jfloat, jint>(f1);
+ jint i11 = bit_cast<jint, jfloat>(f1);
EXPECT_EQ(i11, 11);
- jint i12 = bit_cast<jfloat, jint>(f2);
+ jint i12 = bit_cast<jint, jfloat>(f2);
EXPECT_EQ(i12, 12);
- jint i13 = bit_cast<jfloat, jint>(f3);
+ jint i13 = bit_cast<jint, jfloat>(f3);
EXPECT_EQ(i13, 13);
- jint i14 = bit_cast<jfloat, jint>(f4);
+ jint i14 = bit_cast<jint, jfloat>(f4);
EXPECT_EQ(i14, 14);
- jint i15 = bit_cast<jfloat, jint>(f5);
+ jint i15 = bit_cast<jint, jfloat>(f5);
EXPECT_EQ(i15, 15);
- jint i16 = bit_cast<jfloat, jint>(f6);
+ jint i16 = bit_cast<jint, jfloat>(f6);
EXPECT_EQ(i16, 16);
- jint i17 = bit_cast<jfloat, jint>(f7);
+ jint i17 = bit_cast<jint, jfloat>(f7);
EXPECT_EQ(i17, 17);
- jint i18 = bit_cast<jfloat, jint>(f8);
+ jint i18 = bit_cast<jint, jfloat>(f8);
EXPECT_EQ(i18, 18);
- jint i19 = bit_cast<jfloat, jint>(f9);
+ jint i19 = bit_cast<jint, jfloat>(f9);
EXPECT_EQ(i19, 19);
- jint i20 = bit_cast<jfloat, jint>(f10);
+ jint i20 = bit_cast<jint, jfloat>(f10);
EXPECT_EQ(i20, 20);
}
@@ -1547,16 +1547,16 @@
jint i9 = 9;
jint i10 = 10;
- jfloat f1 = bit_cast<jint, jfloat>(11);
- jfloat f2 = bit_cast<jint, jfloat>(12);
- jfloat f3 = bit_cast<jint, jfloat>(13);
- jfloat f4 = bit_cast<jint, jfloat>(14);
- jfloat f5 = bit_cast<jint, jfloat>(15);
- jfloat f6 = bit_cast<jint, jfloat>(16);
- jfloat f7 = bit_cast<jint, jfloat>(17);
- jfloat f8 = bit_cast<jint, jfloat>(18);
- jfloat f9 = bit_cast<jint, jfloat>(19);
- jfloat f10 = bit_cast<jint, jfloat>(20);
+ jfloat f1 = bit_cast<jfloat, jint>(11);
+ jfloat f2 = bit_cast<jfloat, jint>(12);
+ jfloat f3 = bit_cast<jfloat, jint>(13);
+ jfloat f4 = bit_cast<jfloat, jint>(14);
+ jfloat f5 = bit_cast<jfloat, jint>(15);
+ jfloat f6 = bit_cast<jfloat, jint>(16);
+ jfloat f7 = bit_cast<jfloat, jint>(17);
+ jfloat f8 = bit_cast<jfloat, jint>(18);
+ jfloat f9 = bit_cast<jfloat, jint>(19);
+ jfloat f10 = bit_cast<jfloat, jint>(20);
env_->CallStaticVoidMethod(jklass_, jmethod_, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, f1, f2,
f3, f4, f5, f6, f7, f8, f9, f10);
@@ -1580,25 +1580,25 @@
EXPECT_EQ(i9, 9);
EXPECT_EQ(i10, 10);
- jint i11 = bit_cast<jfloat, jint>(f1);
+ jint i11 = bit_cast<jint, jfloat>(f1);
EXPECT_EQ(i11, 11);
- jint i12 = bit_cast<jfloat, jint>(f2);
+ jint i12 = bit_cast<jint, jfloat>(f2);
EXPECT_EQ(i12, 12);
- jint i13 = bit_cast<jfloat, jint>(f3);
+ jint i13 = bit_cast<jint, jfloat>(f3);
EXPECT_EQ(i13, 13);
- jint i14 = bit_cast<jfloat, jint>(f4);
+ jint i14 = bit_cast<jint, jfloat>(f4);
EXPECT_EQ(i14, 14);
- jint i15 = bit_cast<jfloat, jint>(f5);
+ jint i15 = bit_cast<jint, jfloat>(f5);
EXPECT_EQ(i15, 15);
- jint i16 = bit_cast<jfloat, jint>(f6);
+ jint i16 = bit_cast<jint, jfloat>(f6);
EXPECT_EQ(i16, 16);
- jint i17 = bit_cast<jfloat, jint>(f7);
+ jint i17 = bit_cast<jint, jfloat>(f7);
EXPECT_EQ(i17, 17);
- jint i18 = bit_cast<jfloat, jint>(f8);
+ jint i18 = bit_cast<jint, jfloat>(f8);
EXPECT_EQ(i18, 18);
- jint i19 = bit_cast<jfloat, jint>(f9);
+ jint i19 = bit_cast<jint, jfloat>(f9);
EXPECT_EQ(i19, 19);
- jint i20 = bit_cast<jfloat, jint>(f10);
+ jint i20 = bit_cast<jint, jfloat>(f10);
EXPECT_EQ(i20, 20);
}
@@ -1617,16 +1617,16 @@
jint i9 = 9;
jint i10 = 10;
- jfloat f1 = bit_cast<jint, jfloat>(11);
- jfloat f2 = bit_cast<jint, jfloat>(12);
- jfloat f3 = bit_cast<jint, jfloat>(13);
- jfloat f4 = bit_cast<jint, jfloat>(14);
- jfloat f5 = bit_cast<jint, jfloat>(15);
- jfloat f6 = bit_cast<jint, jfloat>(16);
- jfloat f7 = bit_cast<jint, jfloat>(17);
- jfloat f8 = bit_cast<jint, jfloat>(18);
- jfloat f9 = bit_cast<jint, jfloat>(19);
- jfloat f10 = bit_cast<jint, jfloat>(20);
+ jfloat f1 = bit_cast<jfloat, jint>(11);
+ jfloat f2 = bit_cast<jfloat, jint>(12);
+ jfloat f3 = bit_cast<jfloat, jint>(13);
+ jfloat f4 = bit_cast<jfloat, jint>(14);
+ jfloat f5 = bit_cast<jfloat, jint>(15);
+ jfloat f6 = bit_cast<jfloat, jint>(16);
+ jfloat f7 = bit_cast<jfloat, jint>(17);
+ jfloat f8 = bit_cast<jfloat, jint>(18);
+ jfloat f9 = bit_cast<jfloat, jint>(19);
+ jfloat f10 = bit_cast<jfloat, jint>(20);
env_->CallStaticVoidMethod(jklass_, jmethod_, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, i1, i2, i3,
i4, i5, i6, i7, i8, i9, i10);
@@ -1649,25 +1649,25 @@
EXPECT_EQ(i9, 9);
EXPECT_EQ(i10, 10);
- jint i11 = bit_cast<jfloat, jint>(f1);
+ jint i11 = bit_cast<jint, jfloat>(f1);
EXPECT_EQ(i11, 11);
- jint i12 = bit_cast<jfloat, jint>(f2);
+ jint i12 = bit_cast<jint, jfloat>(f2);
EXPECT_EQ(i12, 12);
- jint i13 = bit_cast<jfloat, jint>(f3);
+ jint i13 = bit_cast<jint, jfloat>(f3);
EXPECT_EQ(i13, 13);
- jint i14 = bit_cast<jfloat, jint>(f4);
+ jint i14 = bit_cast<jint, jfloat>(f4);
EXPECT_EQ(i14, 14);
- jint i15 = bit_cast<jfloat, jint>(f5);
+ jint i15 = bit_cast<jint, jfloat>(f5);
EXPECT_EQ(i15, 15);
- jint i16 = bit_cast<jfloat, jint>(f6);
+ jint i16 = bit_cast<jint, jfloat>(f6);
EXPECT_EQ(i16, 16);
- jint i17 = bit_cast<jfloat, jint>(f7);
+ jint i17 = bit_cast<jint, jfloat>(f7);
EXPECT_EQ(i17, 17);
- jint i18 = bit_cast<jfloat, jint>(f8);
+ jint i18 = bit_cast<jint, jfloat>(f8);
EXPECT_EQ(i18, 18);
- jint i19 = bit_cast<jfloat, jint>(f9);
+ jint i19 = bit_cast<jint, jfloat>(f9);
EXPECT_EQ(i19, 19);
- jint i20 = bit_cast<jfloat, jint>(f10);
+ jint i20 = bit_cast<jint, jfloat>(f10);
EXPECT_EQ(i20, 20);
}
@@ -1686,16 +1686,16 @@
jint i9 = 9;
jint i10 = 10;
- jfloat f1 = bit_cast<jint, jfloat>(11);
- jfloat f2 = bit_cast<jint, jfloat>(12);
- jfloat f3 = bit_cast<jint, jfloat>(13);
- jfloat f4 = bit_cast<jint, jfloat>(14);
- jfloat f5 = bit_cast<jint, jfloat>(15);
- jfloat f6 = bit_cast<jint, jfloat>(16);
- jfloat f7 = bit_cast<jint, jfloat>(17);
- jfloat f8 = bit_cast<jint, jfloat>(18);
- jfloat f9 = bit_cast<jint, jfloat>(19);
- jfloat f10 = bit_cast<jint, jfloat>(20);
+ jfloat f1 = bit_cast<jfloat, jint>(11);
+ jfloat f2 = bit_cast<jfloat, jint>(12);
+ jfloat f3 = bit_cast<jfloat, jint>(13);
+ jfloat f4 = bit_cast<jfloat, jint>(14);
+ jfloat f5 = bit_cast<jfloat, jint>(15);
+ jfloat f6 = bit_cast<jfloat, jint>(16);
+ jfloat f7 = bit_cast<jfloat, jint>(17);
+ jfloat f8 = bit_cast<jfloat, jint>(18);
+ jfloat f9 = bit_cast<jfloat, jint>(19);
+ jfloat f10 = bit_cast<jfloat, jint>(20);
env_->CallStaticVoidMethod(jklass_, jmethod_, i1, f1, i2, f2, i3, f3, i4, f4, i5, f5, i6, f6, i7,
f7, i8, f8, i9, f9, i10, f10);
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 46aed60..afd39e8 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -85,8 +85,6 @@
compiler_options_.reset(new CompilerOptions);
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap);
- callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
- method_inliner_map_.get()));
timer_.reset(new CumulativeLogger("Compilation times"));
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
@@ -122,7 +120,7 @@
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
std::unique_ptr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), nullptr,
- nullptr, false, &error_msg));
+ nullptr, false, nullptr, &error_msg));
ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
new file mode 100644
index 0000000..be432c5
--- /dev/null
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "boolean_simplifier.h"
+
+namespace art {
+
+// Returns true if 'block1' and 'block2' are empty, merge into the same single
+// successor and the successor can only be reached from them.
+static bool BlocksDoMergeTogether(HBasicBlock* block1, HBasicBlock* block2) {
+ if (!block1->IsSingleGoto() || !block2->IsSingleGoto()) return false;
+ HBasicBlock* succ1 = block1->GetSuccessors().Get(0);
+ HBasicBlock* succ2 = block2->GetSuccessors().Get(0);
+ return succ1 == succ2 && succ1->GetPredecessors().Size() == 2u;
+}
+
+// Returns true if the outcome of the branching matches the boolean value of
+// the branching condition.
+static bool PreservesCondition(HInstruction* input_true, HInstruction* input_false) {
+ return input_true->IsIntConstant() && input_true->AsIntConstant()->IsOne()
+ && input_false->IsIntConstant() && input_false->AsIntConstant()->IsZero();
+}
+
+// Returns true if the outcome of the branching is exactly opposite of the
+// boolean value of the branching condition.
+static bool NegatesCondition(HInstruction* input_true, HInstruction* input_false) {
+ return input_true->IsIntConstant() && input_true->AsIntConstant()->IsZero()
+ && input_false->IsIntConstant() && input_false->AsIntConstant()->IsOne();
+}
+
+// Returns an instruction with the opposite boolean value from 'cond'.
+static HInstruction* GetOppositeCondition(HInstruction* cond) {
+ HGraph* graph = cond->GetBlock()->GetGraph();
+ ArenaAllocator* allocator = graph->GetArena();
+
+ if (cond->IsCondition()) {
+ HInstruction* lhs = cond->InputAt(0);
+ HInstruction* rhs = cond->InputAt(1);
+ if (cond->IsEqual()) {
+ return new (allocator) HNotEqual(lhs, rhs);
+ } else if (cond->IsNotEqual()) {
+ return new (allocator) HEqual(lhs, rhs);
+ } else if (cond->IsLessThan()) {
+ return new (allocator) HGreaterThanOrEqual(lhs, rhs);
+ } else if (cond->IsLessThanOrEqual()) {
+ return new (allocator) HGreaterThan(lhs, rhs);
+ } else if (cond->IsGreaterThan()) {
+ return new (allocator) HLessThanOrEqual(lhs, rhs);
+ } else {
+ DCHECK(cond->IsGreaterThanOrEqual());
+ return new (allocator) HLessThan(lhs, rhs);
+ }
+ } else if (cond->IsIntConstant()) {
+ HIntConstant* int_const = cond->AsIntConstant();
+ if (int_const->IsZero()) {
+ return graph->GetIntConstant(1);
+ } else {
+ DCHECK(int_const->IsOne());
+ return graph->GetIntConstant(0);
+ }
+ } else {
+ // General case when 'cond' is another instruction of type boolean.
+ // Negate with 'cond == 0'.
+ return new (allocator) HEqual(cond, graph->GetIntConstant(0));
+ }
+}
+
+void HBooleanSimplifier::Run() {
+ // Iterate in post order in the unlikely case that removing one occurrence of
+ // the pattern empties a branch block of another occurrence. Otherwise the
+ // order does not matter.
+ for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ if (!block->EndsWithIf()) continue;
+
+ // Find elements of the pattern.
+ HIf* if_instruction = block->GetLastInstruction()->AsIf();
+ HBasicBlock* true_block = if_instruction->IfTrueSuccessor();
+ HBasicBlock* false_block = if_instruction->IfFalseSuccessor();
+ if (!BlocksDoMergeTogether(true_block, false_block)) {
+ continue;
+ }
+ HBasicBlock* merge_block = true_block->GetSuccessors().Get(0);
+ if (!merge_block->HasSinglePhi()) {
+ continue;
+ }
+ HPhi* phi = merge_block->GetFirstPhi()->AsPhi();
+ HInstruction* true_value = phi->InputAt(merge_block->GetPredecessorIndexOf(true_block));
+ HInstruction* false_value = phi->InputAt(merge_block->GetPredecessorIndexOf(false_block));
+
+ // Check if the selection negates/preserves the value of the condition and
+ // if so, generate a suitable replacement instruction.
+ HInstruction* if_condition = if_instruction->InputAt(0);
+ HInstruction* replacement;
+ if (NegatesCondition(true_value, false_value)) {
+ replacement = GetOppositeCondition(if_condition);
+ if (replacement->GetBlock() == nullptr) {
+ block->InsertInstructionBefore(replacement, if_instruction);
+ }
+ } else if (PreservesCondition(true_value, false_value)) {
+ replacement = if_condition;
+ } else {
+ continue;
+ }
+
+ // Replace the selection outcome with the new instruction.
+ phi->ReplaceWith(replacement);
+ merge_block->RemovePhi(phi);
+
+ // Link the start/end blocks and remove empty branches.
+ graph_->MergeEmptyBranches(block, merge_block);
+
+ // Remove the original condition if it is now unused.
+ if (!if_condition->HasUses()) {
+ if_condition->GetBlock()->RemoveInstruction(if_condition);
+ }
+ }
+}
+
+} // namespace art
diff --git a/compiler/optimizing/boolean_simplifier.h b/compiler/optimizing/boolean_simplifier.h
new file mode 100644
index 0000000..a88733e
--- /dev/null
+++ b/compiler/optimizing/boolean_simplifier.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This optimization recognizes a common pattern where a boolean value is
+// either cast to an integer or negated by selecting from zero/one integer
+// constants with an If statement. Because boolean values are internally
+// represented as zero/one, we can safely replace the pattern with a suitable
+// condition instruction.
+
+// Example: Negating a boolean value
+// B1:
+// z1 ParameterValue
+// i2 IntConstant 0
+// i3 IntConstant 1
+// v4 Goto B2
+// B2:
+// z5 NotEquals [ z1 i2 ]
+// v6 If [ z5 ] then B3 else B4
+// B3:
+// v7 Goto B5
+// B4:
+// v8 Goto B5
+// B5:
+// i9 Phi [ i3 i2 ]
+// v10 Return [ i9 ]
+// turns into
+// B1:
+// z1 ParameterValue
+// i2 IntConstant 0
+// v4 Goto B2
+// B2:
+// z11 Equals [ z1 i2 ]
+// v10 Return [ z11 ]
+// B3, B4, B5: removed
+
+// Note: in order to recognize empty blocks, this optimization must be run
+// after the instruction simplifier has removed redundant suspend checks.
+
+#ifndef ART_COMPILER_OPTIMIZING_BOOLEAN_SIMPLIFIER_H_
+#define ART_COMPILER_OPTIMIZING_BOOLEAN_SIMPLIFIER_H_
+
+#include "optimization.h"
+
+namespace art {
+
+class HBooleanSimplifier : public HOptimization {
+ public:
+ explicit HBooleanSimplifier(HGraph* graph)
+ : HOptimization(graph, true, kBooleanSimplifierPassName) {}
+
+ void Run() OVERRIDE;
+
+ static constexpr const char* kBooleanSimplifierPassName = "boolean_simplifier";
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HBooleanSimplifier);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_BOOLEAN_SIMPLIFIER_H_
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 24fa583..b3653fe 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -52,12 +52,11 @@
HParameterValue(0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator)
HParameterValue(0, Primitive::kPrimInt); // i
- HInstruction* constant_1 = new (&allocator) HIntConstant(1);
- HInstruction* constant_0 = new (&allocator) HIntConstant(0);
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- entry->AddInstruction(constant_1);
- entry->AddInstruction(constant_0);
+
+ HInstruction* constant_1 = graph->GetIntConstant(1);
+ HInstruction* constant_0 = graph->GetIntConstant(0);
HBasicBlock* block1 = new (&allocator) HBasicBlock(graph);
graph->AddBlock(block1);
@@ -158,14 +157,12 @@
HParameterValue(0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator)
HParameterValue(0, Primitive::kPrimInt); // i
- HInstruction* constant_1 = new (&allocator) HIntConstant(1);
- HInstruction* constant_0 = new (&allocator) HIntConstant(0);
- HInstruction* constant_max_int = new (&allocator) HIntConstant(INT_MAX);
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- entry->AddInstruction(constant_1);
- entry->AddInstruction(constant_0);
- entry->AddInstruction(constant_max_int);
+
+ HInstruction* constant_1 = graph->GetIntConstant(1);
+ HInstruction* constant_0 = graph->GetIntConstant(0);
+ HInstruction* constant_max_int = graph->GetIntConstant(INT_MAX);
HBasicBlock* block1 = new (&allocator) HBasicBlock(graph);
graph->AddBlock(block1);
@@ -232,14 +229,12 @@
HParameterValue(0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator)
HParameterValue(0, Primitive::kPrimInt); // i
- HInstruction* constant_1 = new (&allocator) HIntConstant(1);
- HInstruction* constant_0 = new (&allocator) HIntConstant(0);
- HInstruction* constant_max_int = new (&allocator) HIntConstant(INT_MAX);
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- entry->AddInstruction(constant_1);
- entry->AddInstruction(constant_0);
- entry->AddInstruction(constant_max_int);
+
+ HInstruction* constant_1 = graph->GetIntConstant(1);
+ HInstruction* constant_0 = graph->GetIntConstant(0);
+ HInstruction* constant_max_int = graph->GetIntConstant(INT_MAX);
HBasicBlock* block1 = new (&allocator) HBasicBlock(graph);
graph->AddBlock(block1);
@@ -303,15 +298,12 @@
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
- HInstruction* constant_5 = new (&allocator) HIntConstant(5);
- HInstruction* constant_4 = new (&allocator) HIntConstant(4);
- HInstruction* constant_6 = new (&allocator) HIntConstant(6);
- HInstruction* constant_1 = new (&allocator) HIntConstant(1);
entry->AddInstruction(parameter);
- entry->AddInstruction(constant_5);
- entry->AddInstruction(constant_4);
- entry->AddInstruction(constant_6);
- entry->AddInstruction(constant_1);
+
+ HInstruction* constant_5 = graph->GetIntConstant(5);
+ HInstruction* constant_4 = graph->GetIntConstant(4);
+ HInstruction* constant_6 = graph->GetIntConstant(6);
+ HInstruction* constant_1 = graph->GetIntConstant(1);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
graph->AddBlock(block);
@@ -379,13 +371,11 @@
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
- HInstruction* constant_initial = new (allocator) HIntConstant(initial);
- HInstruction* constant_increment = new (allocator) HIntConstant(increment);
- HInstruction* constant_10 = new (allocator) HIntConstant(10);
entry->AddInstruction(parameter);
- entry->AddInstruction(constant_initial);
- entry->AddInstruction(constant_increment);
- entry->AddInstruction(constant_10);
+
+ HInstruction* constant_initial = graph->GetIntConstant(initial);
+ HInstruction* constant_increment = graph->GetIntConstant(increment);
+ HInstruction* constant_10 = graph->GetIntConstant(10);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
graph->AddBlock(block);
@@ -518,15 +508,12 @@
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
- HInstruction* constant_initial = new (allocator) HIntConstant(initial);
- HInstruction* constant_increment = new (allocator) HIntConstant(increment);
- HInstruction* constant_minus_1 = new (allocator) HIntConstant(-1);
- HInstruction* constant_10 = new (allocator) HIntConstant(10);
entry->AddInstruction(parameter);
- entry->AddInstruction(constant_initial);
- entry->AddInstruction(constant_increment);
- entry->AddInstruction(constant_minus_1);
- entry->AddInstruction(constant_10);
+
+ HInstruction* constant_initial = graph->GetIntConstant(initial);
+ HInstruction* constant_increment = graph->GetIntConstant(increment);
+ HInstruction* constant_minus_1 = graph->GetIntConstant(-1);
+ HInstruction* constant_10 = graph->GetIntConstant(10);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
graph->AddBlock(block);
@@ -651,12 +638,10 @@
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* constant_10 = new (allocator) HIntConstant(10);
- HInstruction* constant_initial = new (allocator) HIntConstant(initial);
- HInstruction* constant_increment = new (allocator) HIntConstant(increment);
- entry->AddInstruction(constant_10);
- entry->AddInstruction(constant_initial);
- entry->AddInstruction(constant_increment);
+
+ HInstruction* constant_10 = graph->GetIntConstant(10);
+ HInstruction* constant_initial = graph->GetIntConstant(initial);
+ HInstruction* constant_increment = graph->GetIntConstant(increment);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
graph->AddBlock(block);
@@ -765,15 +750,12 @@
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
- HInstruction* constant_initial = new (allocator) HIntConstant(initial);
- HInstruction* constant_1 = new (allocator) HIntConstant(1);
- HInstruction* constant_10 = new (allocator) HIntConstant(10);
- HInstruction* constant_minus_1 = new (allocator) HIntConstant(-1);
entry->AddInstruction(parameter);
- entry->AddInstruction(constant_initial);
- entry->AddInstruction(constant_1);
- entry->AddInstruction(constant_10);
- entry->AddInstruction(constant_minus_1);
+
+ HInstruction* constant_initial = graph->GetIntConstant(initial);
+ HInstruction* constant_1 = graph->GetIntConstant(1);
+ HInstruction* constant_10 = graph->GetIntConstant(10);
+ HInstruction* constant_minus_1 = graph->GetIntConstant(-1);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
graph->AddBlock(block);
@@ -893,13 +875,11 @@
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
- HInstruction* constant_0 = new (&allocator) HIntConstant(0);
- HInstruction* constant_minus_1 = new (&allocator) HIntConstant(-1);
- HInstruction* constant_1 = new (&allocator) HIntConstant(1);
entry->AddInstruction(parameter);
- entry->AddInstruction(constant_0);
- entry->AddInstruction(constant_minus_1);
- entry->AddInstruction(constant_1);
+
+ HInstruction* constant_0 = graph->GetIntConstant(0);
+ HInstruction* constant_minus_1 = graph->GetIntConstant(-1);
+ HInstruction* constant_1 = graph->GetIntConstant(1);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
graph->AddBlock(block);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 2cac93d..2cdd5af 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -215,7 +215,7 @@
DCHECK(fallthrough_target != nullptr);
PotentiallyAddSuspendCheck(branch_target, dex_pc);
HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
- T* comparison = new (arena_) T(value, GetIntConstant(0));
+ T* comparison = new (arena_) T(value, graph_->GetIntConstant(0));
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
@@ -515,7 +515,7 @@
template<typename T>
void HGraphBuilder::Binop_22s(const Instruction& instruction, bool reverse) {
HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
- HInstruction* second = GetIntConstant(instruction.VRegC_22s());
+ HInstruction* second = graph_->GetIntConstant(instruction.VRegC_22s());
if (reverse) {
std::swap(first, second);
}
@@ -526,7 +526,7 @@
template<typename T>
void HGraphBuilder::Binop_22b(const Instruction& instruction, bool reverse) {
HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
- HInstruction* second = GetIntConstant(instruction.VRegC_22b());
+ HInstruction* second = graph_->GetIntConstant(instruction.VRegC_22b());
if (reverse) {
std::swap(first, second);
}
@@ -616,11 +616,11 @@
DCHECK((optimized_invoke_type == invoke_type) || (optimized_invoke_type != kDirect)
|| compiler_driver_->GetCompilerOptions().GetCompilePic());
bool is_recursive =
- (target_method.dex_method_index == outer_compilation_unit_->GetDexMethodIndex());
- DCHECK(!is_recursive || (target_method.dex_file == outer_compilation_unit_->GetDexFile()));
+ (target_method.dex_method_index == dex_compilation_unit_->GetDexMethodIndex());
+ DCHECK(!is_recursive || (target_method.dex_file == dex_compilation_unit_->GetDexFile()));
invoke = new (arena_) HInvokeStaticOrDirect(
arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index,
- is_recursive, optimized_invoke_type);
+ is_recursive, invoke_type, optimized_invoke_type);
}
size_t start_index = 0;
@@ -704,6 +704,34 @@
return true;
}
+mirror::Class* HGraphBuilder::GetOutermostCompilingClass() const {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+ const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle(
+ outer_compilation_unit_->GetClassLinker()->FindDexCache(outer_dex_file)));
+
+ return compiler_driver_->ResolveCompilingMethodsClass(
+ soa, outer_dex_cache, class_loader, outer_compilation_unit_);
+}
+
+bool HGraphBuilder::IsOutermostCompilingClass(uint16_t type_index) const {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(
+ dex_compilation_unit_->GetClassLinker()->FindDexCache(*dex_compilation_unit_->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
+ soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
+ Handle<mirror::Class> compiling_class(hs.NewHandle(GetOutermostCompilingClass()));
+
+ return compiling_class.Get() == cls.Get();
+}
+
+
bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put) {
@@ -711,7 +739,7 @@
uint16_t field_index = instruction.VRegB_21c();
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<4> hs(soa.Self());
+ StackHandleScope<5> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(
dex_compilation_unit_->GetClassLinker()->FindDexCache(*dex_compilation_unit_->GetDexFile())));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
@@ -724,23 +752,36 @@
return false;
}
- Handle<mirror::Class> referrer_class(hs.NewHandle(compiler_driver_->ResolveCompilingMethodsClass(
- soa, dex_cache, class_loader, outer_compilation_unit_)));
+ const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
+ Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle(
+ outer_compilation_unit_->GetClassLinker()->FindDexCache(outer_dex_file)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(GetOutermostCompilingClass()));
// The index at which the field's class is stored in the DexCache's type array.
uint32_t storage_index;
- std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
- dex_cache.Get(), referrer_class.Get(), resolved_field.Get(), field_index, &storage_index);
- bool can_easily_access = is_put ? pair.second : pair.first;
- if (!can_easily_access) {
+ bool is_referrer_class = (referrer_class.Get() == resolved_field->GetDeclaringClass());
+ if (is_referrer_class) {
+ storage_index = referrer_class->GetDexTypeIndex();
+ } else if (outer_dex_cache.Get() != dex_cache.Get()) {
+ // The compiler driver cannot currently understand multple dex caches involved. Just bailout.
return false;
+ } else {
+ std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
+ outer_dex_cache.Get(),
+ referrer_class.Get(),
+ resolved_field.Get(),
+ field_index,
+ &storage_index);
+ bool can_easily_access = is_put ? pair.second : pair.first;
+ if (!can_easily_access) {
+ return false;
+ }
}
// TODO: find out why this check is needed.
bool is_in_dex_cache = compiler_driver_->CanAssumeTypeIsPresentInDexCache(
*outer_compilation_unit_->GetDexFile(), storage_index);
bool is_initialized = resolved_field->GetDeclaringClass()->IsInitialized() && is_in_dex_cache;
- bool is_referrer_class = (referrer_class.Get() == resolved_field->GetDeclaringClass());
HLoadClass* constant = new (arena_) HLoadClass(storage_index, is_referrer_class, dex_pc);
current_block_->AddInstruction(constant);
@@ -783,9 +824,9 @@
HInstruction* second = nullptr;
if (second_is_constant) {
if (type == Primitive::kPrimInt) {
- second = GetIntConstant(second_vreg_or_constant);
+ second = graph_->GetIntConstant(second_vreg_or_constant);
} else {
- second = GetLongConstant(second_vreg_or_constant);
+ second = graph_->GetLongConstant(second_vreg_or_constant);
}
} else {
second = LoadLocal(second_vreg_or_constant, type);
@@ -849,7 +890,7 @@
bool is_range,
uint32_t* args,
uint32_t register_index) {
- HInstruction* length = GetIntConstant(number_of_vreg_arguments);
+ HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments);
QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
? kQuickAllocArrayWithAccessCheck
: kQuickAllocArray;
@@ -869,7 +910,7 @@
temps.Add(object);
for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
- HInstruction* index = GetIntConstant(i);
+ HInstruction* index = graph_->GetIntConstant(i);
current_block_->AddInstruction(
new (arena_) HArraySet(object, index, value, type, dex_pc));
}
@@ -883,8 +924,8 @@
Primitive::Type anticipated_type,
uint32_t dex_pc) {
for (uint32_t i = 0; i < element_count; ++i) {
- HInstruction* index = GetIntConstant(i);
- HInstruction* value = GetIntConstant(data[i]);
+ HInstruction* index = graph_->GetIntConstant(i);
+ HInstruction* value = graph_->GetIntConstant(data[i]);
current_block_->AddInstruction(new (arena_) HArraySet(
object, index, value, anticipated_type, dex_pc));
}
@@ -908,7 +949,7 @@
// Implementation of this DEX instruction seems to be that the bounds check is
// done before doing any stores.
- HInstruction* last_index = GetIntConstant(payload->element_count - 1);
+ HInstruction* last_index = graph_->GetIntConstant(payload->element_count - 1);
current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_pc));
switch (payload->element_width) {
@@ -949,8 +990,8 @@
uint32_t element_count,
uint32_t dex_pc) {
for (uint32_t i = 0; i < element_count; ++i) {
- HInstruction* index = GetIntConstant(i);
- HInstruction* value = GetLongConstant(data[i]);
+ HInstruction* index = graph_->GetIntConstant(i);
+ HInstruction* value = graph_->GetLongConstant(data[i]);
current_block_->AddInstruction(new (arena_) HArraySet(
object, index, value, Primitive::kPrimLong, dex_pc));
}
@@ -966,7 +1007,7 @@
// `CanAccessTypeWithoutChecks` will tell whether the method being
// built is trying to access its own class, so that the generated
// code can optimize for this case. However, the optimization does not
- // work for inlining, so we use `IsCompilingClass` instead.
+ // work for inlining, so we use `IsOutermostCompilingClass` instead.
bool dont_use_is_referrers_class;
bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
@@ -976,7 +1017,8 @@
return false;
}
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
- HLoadClass* cls = new (arena_) HLoadClass(type_index, IsCompilingClass(type_index), dex_pc);
+ HLoadClass* cls = new (arena_) HLoadClass(
+ type_index, IsOutermostCompilingClass(type_index), dex_pc);
current_block_->AddInstruction(cls);
// The class needs a temporary before being used by the type check.
Temporaries temps(graph_);
@@ -1024,8 +1066,6 @@
HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
uint16_t num_entries = table.GetNumEntries();
- // There should be at least one entry here.
- DCHECK_GT(num_entries, 0U);
for (size_t i = 0; i < num_entries; i++) {
BuildSwitchCaseHelper(instruction, i, i == static_cast<size_t>(num_entries) - 1, table, value,
@@ -1042,7 +1082,7 @@
PotentiallyAddSuspendCheck(case_target, dex_pc);
// The current case's value.
- HInstruction* this_case_value = GetIntConstant(case_value_int);
+ HInstruction* this_case_value = graph_->GetIntConstant(case_value_int);
// Compare value and this_case_value.
HEqual* comparison = new (arena_) HEqual(value, this_case_value);
@@ -1100,28 +1140,28 @@
switch (instruction.Opcode()) {
case Instruction::CONST_4: {
int32_t register_index = instruction.VRegA();
- HIntConstant* constant = GetIntConstant(instruction.VRegB_11n());
+ HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_11n());
UpdateLocal(register_index, constant);
break;
}
case Instruction::CONST_16: {
int32_t register_index = instruction.VRegA();
- HIntConstant* constant = GetIntConstant(instruction.VRegB_21s());
+ HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_21s());
UpdateLocal(register_index, constant);
break;
}
case Instruction::CONST: {
int32_t register_index = instruction.VRegA();
- HIntConstant* constant = GetIntConstant(instruction.VRegB_31i());
+ HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_31i());
UpdateLocal(register_index, constant);
break;
}
case Instruction::CONST_HIGH16: {
int32_t register_index = instruction.VRegA();
- HIntConstant* constant = GetIntConstant(instruction.VRegB_21h() << 16);
+ HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_21h() << 16);
UpdateLocal(register_index, constant);
break;
}
@@ -1132,7 +1172,7 @@
int64_t value = instruction.VRegB_21s();
value <<= 48;
value >>= 48;
- HLongConstant* constant = GetLongConstant(value);
+ HLongConstant* constant = graph_->GetLongConstant(value);
UpdateLocal(register_index, constant);
break;
}
@@ -1143,14 +1183,14 @@
int64_t value = instruction.VRegB_31i();
value <<= 32;
value >>= 32;
- HLongConstant* constant = GetLongConstant(value);
+ HLongConstant* constant = graph_->GetLongConstant(value);
UpdateLocal(register_index, constant);
break;
}
case Instruction::CONST_WIDE: {
int32_t register_index = instruction.VRegA();
- HLongConstant* constant = GetLongConstant(instruction.VRegB_51l());
+ HLongConstant* constant = graph_->GetLongConstant(instruction.VRegB_51l());
UpdateLocal(register_index, constant);
break;
}
@@ -1158,7 +1198,7 @@
case Instruction::CONST_WIDE_HIGH16: {
int32_t register_index = instruction.VRegA();
int64_t value = static_cast<int64_t>(instruction.VRegB_21h()) << 48;
- HLongConstant* constant = GetLongConstant(value);
+ HLongConstant* constant = graph_->GetLongConstant(value);
UpdateLocal(register_index, constant);
break;
}
@@ -1973,7 +2013,7 @@
// `CanAccessTypeWithoutChecks` will tell whether the method being
// built is trying to access its own class, so that the generated
// code can optimize for this case. However, the optimization does not
- // work for inlining, so we use `IsCompilingClass` instead.
+ // work for inlining, so we use `IsOutermostCompilingClass` instead.
bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
&type_known_final, &type_known_abstract, &dont_use_is_referrers_class);
@@ -1982,7 +2022,7 @@
return false;
}
current_block_->AddInstruction(
- new (arena_) HLoadClass(type_index, IsCompilingClass(type_index), dex_pc));
+ new (arena_) HLoadClass(type_index, IsOutermostCompilingClass(type_index), dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
@@ -2060,42 +2100,6 @@
return true;
} // NOLINT(readability/fn_size)
-HIntConstant* HGraphBuilder::GetIntConstant0() {
- if (constant0_ != nullptr) {
- return constant0_;
- }
- constant0_ = new(arena_) HIntConstant(0);
- entry_block_->AddInstruction(constant0_);
- return constant0_;
-}
-
-HIntConstant* HGraphBuilder::GetIntConstant1() {
- if (constant1_ != nullptr) {
- return constant1_;
- }
- constant1_ = new(arena_) HIntConstant(1);
- entry_block_->AddInstruction(constant1_);
- return constant1_;
-}
-
-HIntConstant* HGraphBuilder::GetIntConstant(int32_t constant) {
- switch (constant) {
- case 0: return GetIntConstant0();
- case 1: return GetIntConstant1();
- default: {
- HIntConstant* instruction = new (arena_) HIntConstant(constant);
- entry_block_->AddInstruction(instruction);
- return instruction;
- }
- }
-}
-
-HLongConstant* HGraphBuilder::GetLongConstant(int64_t constant) {
- HLongConstant* instruction = new (arena_) HLongConstant(constant);
- entry_block_->AddInstruction(instruction);
- return instruction;
-}
-
HLocal* HGraphBuilder::GetLocalAt(int register_index) const {
return locals_.Get(register_index);
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 96196de..6a0738a 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -47,8 +47,6 @@
exit_block_(nullptr),
current_block_(nullptr),
graph_(graph),
- constant0_(nullptr),
- constant1_(nullptr),
dex_file_(dex_file),
dex_compilation_unit_(dex_compilation_unit),
compiler_driver_(driver),
@@ -67,8 +65,6 @@
exit_block_(nullptr),
current_block_(nullptr),
graph_(graph),
- constant0_(nullptr),
- constant1_(nullptr),
dex_file_(nullptr),
dex_compilation_unit_(nullptr),
compiler_driver_(nullptr),
@@ -100,10 +96,6 @@
void MaybeUpdateCurrentBlock(size_t index);
HBasicBlock* FindBlockStartingAt(int32_t index) const;
- HIntConstant* GetIntConstant0();
- HIntConstant* GetIntConstant1();
- HIntConstant* GetIntConstant(int32_t constant);
- HLongConstant* GetLongConstant(int64_t constant);
void InitializeLocals(uint16_t count);
HLocal* GetLocalAt(int register_index) const;
void UpdateLocal(int register_index, HInstruction* instruction) const;
@@ -231,13 +223,10 @@
void MaybeRecordStat(MethodCompilationStat compilation_stat);
+ mirror::Class* GetOutermostCompilingClass() const;
+
// Returns whether `type_index` points to the outer-most compiling method's class.
- bool IsCompilingClass(uint16_t type_index) const {
- uint32_t referrer_index = outer_compilation_unit_->GetDexMethodIndex();
- const DexFile::MethodId& method_id =
- outer_compilation_unit_->GetDexFile()->GetMethodId(referrer_index);
- return method_id.class_idx_ == type_index;
- }
+ bool IsOutermostCompilingClass(uint16_t type_index) const;
ArenaAllocator* const arena_;
@@ -253,9 +242,6 @@
HBasicBlock* current_block_;
HGraph* const graph_;
- HIntConstant* constant0_;
- HIntConstant* constant1_;
-
// The dex file where the method being compiled is.
const DexFile* const dex_file_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 561dcb7..bd6e943 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -40,16 +40,6 @@
return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
}
-static bool IsSingleGoto(HBasicBlock* block) {
- HLoopInformation* loop_info = block->GetLoopInformation();
- // TODO: Remove the null check b/19084197.
- return (block->GetFirstInstruction() != nullptr)
- && (block->GetFirstInstruction() == block->GetLastInstruction())
- && block->GetLastInstruction()->IsGoto()
- // Back edges generate the suspend check.
- && (loop_info == nullptr || !loop_info->IsBackEdge(block));
-}
-
void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
Initialize();
if (!is_leaf) {
@@ -74,7 +64,7 @@
HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
for (size_t i = current_block_index_ + 1; i < block_order_->Size(); ++i) {
HBasicBlock* block = block_order_->Get(i);
- if (!IsSingleGoto(block)) {
+ if (!block->IsSingleGoto()) {
return block;
}
}
@@ -82,7 +72,7 @@
}
HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
- while (IsSingleGoto(block)) {
+ while (block->IsSingleGoto()) {
block = block->GetSuccessors().Get(0);
}
return block;
@@ -97,7 +87,7 @@
// Don't generate code for an empty block. Its predecessors will branch to its successor
// directly. Also, the label of that block will not be emitted, so this helps catch
// errors where we reference that label.
- if (IsSingleGoto(block)) continue;
+ if (block->IsSingleGoto()) continue;
Bind(block);
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
@@ -628,7 +618,7 @@
++i, DexRegisterLocation::Kind::kConstant, High32Bits(value));
DCHECK_LT(i, environment_size);
} else if (current->IsDoubleConstant()) {
- int64_t value = bit_cast<double, int64_t>(current->AsDoubleConstant()->GetValue());
+ int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
stack_map_stream_.AddDexRegisterEntry(
i, DexRegisterLocation::Kind::kConstant, Low32Bits(value));
stack_map_stream_.AddDexRegisterEntry(
@@ -641,7 +631,7 @@
stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, 0);
} else {
DCHECK(current->IsFloatConstant()) << current->DebugName();
- int32_t value = bit_cast<float, int32_t>(current->AsFloatConstant()->GetValue());
+ int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, value);
}
break;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index ecaa6f0..07ca6b1 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -271,7 +271,7 @@
return 0;
} else {
DCHECK(constant->IsFloatConstant());
- return bit_cast<float, int32_t>(constant->AsFloatConstant()->GetValue());
+ return bit_cast<int32_t, float>(constant->AsFloatConstant()->GetValue());
}
}
@@ -281,12 +281,12 @@
} else if (constant->IsNullConstant()) {
return 0;
} else if (constant->IsFloatConstant()) {
- return bit_cast<float, int32_t>(constant->AsFloatConstant()->GetValue());
+ return bit_cast<int32_t, float>(constant->AsFloatConstant()->GetValue());
} else if (constant->IsLongConstant()) {
return constant->AsLongConstant()->GetValue();
} else {
DCHECK(constant->IsDoubleConstant());
- return bit_cast<double, int64_t>(constant->AsDoubleConstant()->GetValue());
+ return bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
}
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 0a069a7..1f95041 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -41,12 +41,6 @@
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2, R3 };
-static constexpr size_t kRuntimeParameterCoreRegistersLength =
- arraysize(kRuntimeParameterCoreRegisters);
-static constexpr SRegister kRuntimeParameterFpuRegisters[] = { S0, S1, S2, S3 };
-static constexpr size_t kRuntimeParameterFpuRegistersLength =
- arraysize(kRuntimeParameterFpuRegisters);
// We unconditionally allocate R5 to ensure we can do long operations
// with baseline.
static constexpr Register kCoreSavedRegisterForBaseline = R5;
@@ -59,18 +53,6 @@
// S registers. Therefore there is no need to block it.
static constexpr DRegister DTMP = D31;
-class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegister> {
- public:
- InvokeRuntimeCallingConvention()
- : CallingConvention(kRuntimeParameterCoreRegisters,
- kRuntimeParameterCoreRegistersLength,
- kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
-};
-
#define __ reinterpret_cast<ArmAssembler*>(codegen->GetAssembler())->
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
@@ -883,7 +865,7 @@
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
- if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) {
+ if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
@@ -1388,9 +1370,14 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+ // The Java language does not allow treating boolean as an integral type but
+ // our bit representation makes it safe.
+
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1407,6 +1394,8 @@
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1451,6 +1440,8 @@
case Primitive::kPrimLong:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1487,6 +1478,8 @@
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1503,6 +1496,8 @@
case Primitive::kPrimFloat:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1536,6 +1531,8 @@
case Primitive::kPrimDouble:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1582,6 +1579,8 @@
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1597,6 +1596,8 @@
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1654,6 +1655,8 @@
case Primitive::kPrimLong:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1692,6 +1695,8 @@
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1707,6 +1712,8 @@
case Primitive::kPrimFloat:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1773,6 +1780,8 @@
case Primitive::kPrimDouble:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -2078,16 +2087,32 @@
}
void LocationsBuilderARM::VisitDiv(HDiv* div) {
- LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
- ? LocationSummary::kCall
- : LocationSummary::kNoCall;
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ if (div->GetResultType() == Primitive::kPrimLong) {
+ // pLdiv runtime call.
+ call_kind = LocationSummary::kCall;
+ } else if (div->GetResultType() == Primitive::kPrimInt &&
+ !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ // pIdivmod runtime call.
+ call_kind = LocationSummary::kCall;
+ }
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+ // we only need the former.
+ locations->SetOut(Location::RegisterLocation(R0));
+ }
break;
}
case Primitive::kPrimLong: {
@@ -2120,9 +2145,18 @@
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
- __ sdiv(out.AsRegister<Register>(),
- first.AsRegister<Register>(),
- second.AsRegister<Register>());
+ if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ __ sdiv(out.AsRegister<Register>(),
+ first.AsRegister<Register>(),
+ second.AsRegister<Register>());
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
+ DCHECK_EQ(R0, out.AsRegister<Register>());
+
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
+ }
break;
}
@@ -2160,17 +2194,32 @@
void LocationsBuilderARM::VisitRem(HRem* rem) {
Primitive::Type type = rem->GetResultType();
- LocationSummary::CallKind call_kind = type == Primitive::kPrimInt
- ? LocationSummary::kNoCall
- : LocationSummary::kCall;
+
+ // Most remainders are implemented in the runtime.
+ LocationSummary::CallKind call_kind = LocationSummary::kCall;
+ if (rem->GetResultType() == Primitive::kPrimInt &&
+ codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ // Have hardware divide instruction for int, do it with three instructions.
+ call_kind = LocationSummary::kNoCall;
+ }
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
switch (type) {
case Primitive::kPrimInt: {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- locations->AddTemp(Location::RequiresRegister());
+ if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ locations->AddTemp(Location::RequiresRegister());
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+ // we only need the latter.
+ locations->SetOut(Location::RegisterLocation(R1));
+ }
break;
}
case Primitive::kPrimLong: {
@@ -2215,16 +2264,25 @@
Primitive::Type type = rem->GetResultType();
switch (type) {
case Primitive::kPrimInt: {
- Register reg1 = first.AsRegister<Register>();
- Register reg2 = second.AsRegister<Register>();
- Register temp = locations->GetTemp(0).AsRegister<Register>();
+ if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ Register reg1 = first.AsRegister<Register>();
+ Register reg2 = second.AsRegister<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
- // temp = reg1 / reg2 (integer division)
- // temp = temp * reg2
- // dest = reg1 - temp
- __ sdiv(temp, reg1, reg2);
- __ mul(temp, temp, reg2);
- __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
+ // temp = reg1 / reg2 (integer division)
+ // temp = temp * reg2
+ // dest = reg1 - temp
+ __ sdiv(temp, reg1, reg2);
+ __ mul(temp, temp, reg2);
+ __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
+ DCHECK_EQ(R1, out.AsRegister<Register>());
+
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
+ }
break;
}
@@ -2299,10 +2357,8 @@
void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
- LocationSummary::CallKind call_kind = op->GetResultType() == Primitive::kPrimLong
- ? LocationSummary::kCall
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(op, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
case Primitive::kPrimInt: {
@@ -2312,12 +2368,10 @@
break;
}
case Primitive::kPrimLong: {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, Location::RegisterPairLocation(
- calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
- // The runtime helper puts the output in R0,R1.
- locations->SetOut(Location::RegisterPairLocation(R0, R1));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
break;
}
default:
@@ -2365,24 +2419,56 @@
break;
}
case Primitive::kPrimLong: {
- // TODO: Inline the assembly instead of calling the runtime.
- InvokeRuntimeCallingConvention calling_convention;
- DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
- DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
- DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegister<Register>());
- DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
- DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
+ Register o_h = out.AsRegisterPairHigh<Register>();
+ Register o_l = out.AsRegisterPairLow<Register>();
- int32_t entry_point_offset;
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+
+ Register high = first.AsRegisterPairHigh<Register>();
+ Register low = first.AsRegisterPairLow<Register>();
+
+ Register second_reg = second.AsRegister<Register>();
+
if (op->IsShl()) {
- entry_point_offset = QUICK_ENTRY_POINT(pShlLong);
+ // Shift the high part
+ __ and_(second_reg, second_reg, ShifterOperand(63));
+ __ Lsl(o_h, high, second_reg);
+ // Shift the low part and `or` what overflew on the high part
+ __ rsb(temp, second_reg, ShifterOperand(32));
+ __ Lsr(temp, low, temp);
+ __ orr(o_h, o_h, ShifterOperand(temp));
+ // If the shift is > 32 bits, override the high part
+ __ subs(temp, second_reg, ShifterOperand(32));
+ __ it(PL);
+ __ Lsl(o_h, low, temp, false, PL);
+ // Shift the low part
+ __ Lsl(o_l, low, second_reg);
} else if (op->IsShr()) {
- entry_point_offset = QUICK_ENTRY_POINT(pShrLong);
+ // Shift the low part
+ __ and_(second_reg, second_reg, ShifterOperand(63));
+ __ Lsr(o_l, low, second_reg);
+ // Shift the high part and `or` what underflew on the low part
+ __ rsb(temp, second_reg, ShifterOperand(32));
+ __ Lsl(temp, high, temp);
+ __ orr(o_l, o_l, ShifterOperand(temp));
+ // If the shift is > 32 bits, override the low part
+ __ subs(temp, second_reg, ShifterOperand(32));
+ __ it(PL);
+ __ Asr(o_l, high, temp, false, PL);
+ // Shift the high part
+ __ Asr(o_h, high, second_reg);
} else {
- entry_point_offset = QUICK_ENTRY_POINT(pUshrLong);
+ // same as Shr except we use `Lsr`s and not `Asr`s
+ __ and_(second_reg, second_reg, ShifterOperand(63));
+ __ Lsr(o_l, low, second_reg);
+ __ rsb(temp, second_reg, ShifterOperand(32));
+ __ Lsl(temp, high, temp);
+ __ orr(o_l, o_l, ShifterOperand(temp));
+ __ subs(temp, second_reg, ShifterOperand(32));
+ __ it(PL);
+ __ Lsr(o_l, high, temp, false, PL);
+ __ Lsr(o_h, high, second_reg);
}
- __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
- __ blx(LR);
break;
}
default:
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 57e1d2f..bcdea7a 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -41,6 +41,25 @@
static constexpr Register kArtMethodRegister = R0;
+static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2, R3 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+static constexpr SRegister kRuntimeParameterFpuRegisters[] = { S0, S1, S2, S3 };
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+ arraysize(kRuntimeParameterFpuRegisters);
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegister> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
static constexpr DRegister FromLowSToD(SRegister reg) {
return DCHECK_CONSTEXPR(reg % 2 == 0, , D0)
static_cast<DRegister>(reg / 2);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index aeec5dd..32ada38 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -63,6 +63,7 @@
using helpers::VIXLRegCodeFromART;
using helpers::WRegisterFrom;
using helpers::XRegisterFrom;
+using helpers::ARM64EncodableConstantOrRegister;
static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
static constexpr int kCurrentMethodStackOffset = 0;
@@ -97,29 +98,6 @@
}
}
-static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 };
-static constexpr size_t kRuntimeParameterCoreRegistersLength =
- arraysize(kRuntimeParameterCoreRegisters);
-static const FPRegister kRuntimeParameterFpuRegisters[] = { d0, d1, d2, d3, d4, d5, d6, d7 };
-static constexpr size_t kRuntimeParameterFpuRegistersLength =
- arraysize(kRuntimeParameterCoreRegisters);
-
-class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> {
- public:
- static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
-
- InvokeRuntimeCallingConvention()
- : CallingConvention(kRuntimeParameterCoreRegisters,
- kRuntimeParameterCoreRegistersLength,
- kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
-
- Location GetReturnLocation(Primitive::Type return_type);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
-};
-
Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
return ARM64ReturnLocation(return_type);
}
@@ -1106,7 +1084,7 @@
case Primitive::kPrimInt:
case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
@@ -1398,7 +1376,7 @@
switch (in_type) {
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1)));
+ locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -1468,7 +1446,7 @@
void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
if (instruction->NeedsMaterialization()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -1620,7 +1598,7 @@
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
- if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) {
+ if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
@@ -2116,7 +2094,7 @@
switch (neg->GetResultType()) {
case Primitive::kPrimInt:
case Primitive::kPrimLong:
- locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0)));
+ locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index cbb2e5c..2c624d2 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -80,6 +80,31 @@
DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
};
+static const vixl::Register kRuntimeParameterCoreRegisters[] =
+ { vixl::x0, vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+static const vixl::FPRegister kRuntimeParameterFpuRegisters[] =
+ { vixl::d0, vixl::d1, vixl::d2, vixl::d3, vixl::d4, vixl::d5, vixl::d6, vixl::d7 };
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+
+class InvokeRuntimeCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
+ public:
+ static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength) {}
+
+ Location GetReturnLocation(Primitive::Type return_type);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
public:
InvokeDexCallingConvention()
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 754dd10..0d5fe49 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -19,6 +19,8 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "intrinsics.h"
+#include "intrinsics_x86.h"
#include "mirror/array-inl.h"
#include "mirror/art_method.h"
#include "mirror/class.h"
@@ -34,46 +36,14 @@
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX };
-static constexpr size_t kRuntimeParameterCoreRegistersLength =
- arraysize(kRuntimeParameterCoreRegisters);
static constexpr Register kCoreCalleeSaves[] = { EBP, ESI, EDI };
-static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1, XMM2, XMM3 };
-static constexpr size_t kRuntimeParameterFpuRegistersLength =
- arraysize(kRuntimeParameterFpuRegisters);
static constexpr int kC2ConditionMask = 0x400;
static constexpr int kFakeReturnRegister = Register(8);
-class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmRegister> {
- public:
- InvokeRuntimeCallingConvention()
- : CallingConvention(kRuntimeParameterCoreRegisters,
- kRuntimeParameterCoreRegistersLength,
- kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
-};
-
#define __ reinterpret_cast<X86Assembler*>(codegen->GetAssembler())->
-class SlowPathCodeX86 : public SlowPathCode {
- public:
- SlowPathCodeX86() : entry_label_(), exit_label_() {}
-
- Label* GetEntryLabel() { return &entry_label_; }
- Label* GetExitLabel() { return &exit_label_; }
-
- private:
- Label entry_label_;
- Label exit_label_;
-
- DISALLOW_COPY_AND_ASSIGN(SlowPathCodeX86);
-};
-
class NullCheckSlowPathX86 : public SlowPathCodeX86 {
public:
explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {}
@@ -680,7 +650,7 @@
value = constant->AsLongConstant()->GetValue();
} else {
DCHECK(constant->IsDoubleConstant());
- value = bit_cast<double, int64_t>(constant->AsDoubleConstant()->GetValue());
+ value = bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
}
__ movl(Address(ESP, destination.GetStackIndex()), Immediate(Low32Bits(value)));
__ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
@@ -792,7 +762,7 @@
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
- if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) {
+ if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
@@ -1140,35 +1110,30 @@
}
void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ IntrinsicLocationsBuilderX86 intrinsic(GetGraph()->GetArena());
+ if (intrinsic.TryDispatch(invoke)) {
+ return;
+ }
+
HandleInvoke(invoke);
}
+static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorX86* codegen) {
+ if (invoke->GetLocations()->Intrinsified()) {
+ IntrinsicCodeGeneratorX86 intrinsic(codegen);
+ intrinsic.Dispatch(invoke);
+ return true;
+ }
+ return false;
+}
+
void InstructionCodeGeneratorX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
-
- // TODO: Implement all kinds of calls:
- // 1) boot -> boot
- // 2) app -> boot
- // 3) app -> app
- //
- // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
- // temp = method;
- codegen_->LoadCurrentMethod(temp);
- if (!invoke->IsRecursive()) {
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
- // (temp + offset_of_quick_compiled_code)()
- __ call(Address(
- temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
- } else {
- __ call(codegen_->GetFrameEntryLabel());
+ if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+ return;
}
- DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ codegen_->GenerateStaticOrDirectCall(
+ invoke, invoke->GetLocations()->GetTemp(0).AsRegister<Register>());
}
void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -1370,9 +1335,14 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+ // The Java language does not allow treating boolean as an integral type but
+ // our bit representation makes it safe.
+
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1391,6 +1361,8 @@
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1435,6 +1407,8 @@
case Primitive::kPrimLong:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1464,6 +1438,8 @@
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1480,6 +1456,8 @@
case Primitive::kPrimFloat:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1511,6 +1489,8 @@
case Primitive::kPrimDouble:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1556,6 +1536,8 @@
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1577,6 +1559,8 @@
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1672,6 +1656,8 @@
case Primitive::kPrimLong:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1703,6 +1689,8 @@
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1726,6 +1714,8 @@
case Primitive::kPrimFloat:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1783,6 +1773,8 @@
case Primitive::kPrimDouble:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -2730,26 +2722,45 @@
Label less, greater, done;
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
+ Register left_low = left.AsRegisterPairLow<Register>();
+ Register left_high = left.AsRegisterPairHigh<Register>();
+ int32_t val_low = 0;
+ int32_t val_high = 0;
+ bool right_is_const = false;
+
+ if (right.IsConstant()) {
+ DCHECK(right.GetConstant()->IsLongConstant());
+ right_is_const = true;
+ int64_t val = right.GetConstant()->AsLongConstant()->GetValue();
+ val_low = Low32Bits(val);
+ val_high = High32Bits(val);
+ }
+
if (right.IsRegisterPair()) {
- __ cmpl(left.AsRegisterPairHigh<Register>(), right.AsRegisterPairHigh<Register>());
+ __ cmpl(left_high, right.AsRegisterPairHigh<Register>());
} else if (right.IsDoubleStackSlot()) {
- __ cmpl(left.AsRegisterPairHigh<Register>(),
- Address(ESP, right.GetHighStackIndex(kX86WordSize)));
+ __ cmpl(left_high, Address(ESP, right.GetHighStackIndex(kX86WordSize)));
} else {
- DCHECK(right.IsConstant()) << right;
- __ cmpl(left.AsRegisterPairHigh<Register>(),
- Immediate(High32Bits(right.GetConstant()->AsLongConstant()->GetValue())));
+ DCHECK(right_is_const) << right;
+ if (val_high == 0) {
+ __ testl(left_high, left_high);
+ } else {
+ __ cmpl(left_high, Immediate(val_high));
+ }
}
__ j(kLess, &less); // Signed compare.
__ j(kGreater, &greater); // Signed compare.
if (right.IsRegisterPair()) {
- __ cmpl(left.AsRegisterPairLow<Register>(), right.AsRegisterPairLow<Register>());
+ __ cmpl(left_low, right.AsRegisterPairLow<Register>());
} else if (right.IsDoubleStackSlot()) {
- __ cmpl(left.AsRegisterPairLow<Register>(), Address(ESP, right.GetStackIndex()));
+ __ cmpl(left_low, Address(ESP, right.GetStackIndex()));
} else {
- DCHECK(right.IsConstant()) << right;
- __ cmpl(left.AsRegisterPairLow<Register>(),
- Immediate(Low32Bits(right.GetConstant()->AsLongConstant()->GetValue())));
+ DCHECK(right_is_const) << right;
+ if (val_low == 0) {
+ __ testl(left_low, left_low);
+ } else {
+ __ cmpl(left_low, Immediate(val_low));
+ }
}
break;
}
@@ -2817,6 +2828,32 @@
}
+void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Register temp) {
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(
+ temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ } else {
+ __ call(GetFrameEntryLabel());
+ }
+
+ DCHECK(!IsLeafMethod());
+ RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
void CodeGeneratorX86::MarkGCCard(Register temp, Register card, Register object, Register value) {
Label is_null;
__ testl(value, value);
@@ -3109,7 +3146,7 @@
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmpl(obj.AsRegister<Register>(), Immediate(0));
+ __ testl(obj.AsRegister<Register>(), obj.AsRegister<Register>());
} else if (obj.IsStackSlot()) {
__ cmpl(Address(ESP, obj.GetStackIndex()), Immediate(0));
} else {
@@ -3645,14 +3682,21 @@
__ movl(Address(ESP, destination.GetStackIndex()), Immediate(value));
}
} else if (constant->IsFloatConstant()) {
- float value = constant->AsFloatConstant()->GetValue();
- Immediate imm(bit_cast<float, int32_t>(value));
+ float fp_value = constant->AsFloatConstant()->GetValue();
+ int32_t value = bit_cast<int32_t, float>(fp_value);
+ Immediate imm(value);
if (destination.IsFpuRegister()) {
- ScratchRegisterScope ensure_scratch(
- this, kNoRegister, EAX, codegen_->GetNumberOfCoreRegisters());
- Register temp = static_cast<Register>(ensure_scratch.GetRegister());
- __ movl(temp, imm);
- __ movd(destination.AsFpuRegister<XmmRegister>(), temp);
+ XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
+ if (value == 0) {
+ // Easy handling of 0.0.
+ __ xorps(dest, dest);
+ } else {
+ ScratchRegisterScope ensure_scratch(
+ this, kNoRegister, EAX, codegen_->GetNumberOfCoreRegisters());
+ Register temp = static_cast<Register>(ensure_scratch.GetRegister());
+ __ movl(temp, Immediate(value));
+ __ movd(dest, temp);
+ }
} else {
DCHECK(destination.IsStackSlot()) << destination;
__ movl(Address(ESP, destination.GetStackIndex()), imm);
@@ -3673,7 +3717,7 @@
} else {
DCHECK(constant->IsDoubleConstant());
double dbl_value = constant->AsDoubleConstant()->GetValue();
- int64_t value = bit_cast<double, int64_t>(dbl_value);
+ int64_t value = bit_cast<int64_t, double>(dbl_value);
int32_t low_value = Low32Bits(value);
int32_t high_value = High32Bits(value);
Immediate low(low_value);
@@ -4107,18 +4151,38 @@
} else {
DCHECK(second.IsConstant()) << second;
int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
- Immediate low(Low32Bits(value));
- Immediate high(High32Bits(value));
+ int32_t low_value = Low32Bits(value);
+ int32_t high_value = High32Bits(value);
+ Immediate low(low_value);
+ Immediate high(high_value);
+ Register first_low = first.AsRegisterPairLow<Register>();
+ Register first_high = first.AsRegisterPairHigh<Register>();
if (instruction->IsAnd()) {
- __ andl(first.AsRegisterPairLow<Register>(), low);
- __ andl(first.AsRegisterPairHigh<Register>(), high);
+ if (low_value == 0) {
+ __ xorl(first_low, first_low);
+ } else if (low_value != -1) {
+ __ andl(first_low, low);
+ }
+ if (high_value == 0) {
+ __ xorl(first_high, first_high);
+ } else if (high_value != -1) {
+ __ andl(first_high, high);
+ }
} else if (instruction->IsOr()) {
- __ orl(first.AsRegisterPairLow<Register>(), low);
- __ orl(first.AsRegisterPairHigh<Register>(), high);
+ if (low_value != 0) {
+ __ orl(first_low, low);
+ }
+ if (high_value != 0) {
+ __ orl(first_high, high);
+ }
} else {
DCHECK(instruction->IsXor());
- __ xorl(first.AsRegisterPairLow<Register>(), low);
- __ xorl(first.AsRegisterPairHigh<Register>(), high);
+ if (low_value != 0) {
+ __ xorl(first_low, low);
+ }
+ if (high_value != 0) {
+ __ xorl(first_high, high);
+ }
}
}
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index c5763de..6a4d42d 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -39,6 +39,25 @@
static constexpr XmmRegister kParameterFpuRegisters[] = { XMM0, XMM1, XMM2, XMM3 };
static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
+static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1, XMM2, XMM3 };
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+ arraysize(kRuntimeParameterFpuRegisters);
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmRegister> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
class InvokeDexCallingConvention : public CallingConvention<Register, XmmRegister> {
public:
InvokeDexCallingConvention() : CallingConvention(
@@ -228,6 +247,9 @@
// Helper method to move a 64bits value between two locations.
void Move64(Location destination, Location source);
+ // Generate a call to a static or direct method.
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp);
+
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value);
@@ -261,6 +283,20 @@
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86);
};
+class SlowPathCodeX86 : public SlowPathCode {
+ public:
+ SlowPathCodeX86() : entry_label_(), exit_label_() {}
+
+ Label* GetEntryLabel() { return &entry_label_; }
+ Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ Label entry_label_;
+ Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeX86);
+};
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index dbd7c9e..ef60280 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -39,28 +39,11 @@
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX };
-static constexpr size_t kRuntimeParameterCoreRegistersLength =
- arraysize(kRuntimeParameterCoreRegisters);
-static constexpr FloatRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1 };
-static constexpr size_t kRuntimeParameterFpuRegistersLength =
- arraysize(kRuntimeParameterFpuRegisters);
static constexpr Register kCoreCalleeSaves[] = { RBX, RBP, R12, R13, R14, R15 };
static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15 };
static constexpr int kC2ConditionMask = 0x400;
-class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatRegister> {
- public:
- InvokeRuntimeCallingConvention()
- : CallingConvention(kRuntimeParameterCoreRegisters,
- kRuntimeParameterCoreRegistersLength,
- kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
-};
#define __ reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler())->
@@ -625,7 +608,7 @@
HConstant* constant = source.GetConstant();
int64_t value = constant->AsLongConstant()->GetValue();
if (constant->IsDoubleConstant()) {
- value = bit_cast<double, int64_t>(constant->AsDoubleConstant()->GetValue());
+ value = bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
} else {
DCHECK(constant->IsLongConstant());
value = constant->AsLongConstant()->GetValue();
@@ -729,7 +712,7 @@
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
- if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) {
+ if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
@@ -956,7 +939,7 @@
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(compare->InputAt(1)));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -982,7 +965,18 @@
Primitive::Type type = compare->InputAt(0)->GetType();
switch (type) {
case Primitive::kPrimLong: {
- __ cmpq(left.AsRegister<CpuRegister>(), right.AsRegister<CpuRegister>());
+ CpuRegister left_reg = left.AsRegister<CpuRegister>();
+ if (right.IsConstant()) {
+ int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(value));
+ if (value == 0) {
+ __ testq(left_reg, left_reg);
+ } else {
+ __ cmpq(left_reg, Immediate(static_cast<int32_t>(value)));
+ }
+ } else {
+ __ cmpq(left_reg, right.AsRegister<CpuRegister>());
+ }
break;
}
case Primitive::kPrimFloat: {
@@ -1398,9 +1392,15 @@
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
DCHECK_NE(result_type, input_type);
+
+ // The Java language does not allow treating boolean as an integral type but
+ // our bit representation makes it safe.
+
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1417,6 +1417,8 @@
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1461,6 +1463,8 @@
case Primitive::kPrimLong:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1494,6 +1498,8 @@
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1510,6 +1516,8 @@
case Primitive::kPrimFloat:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1539,6 +1547,8 @@
case Primitive::kPrimDouble:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1582,6 +1592,8 @@
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1606,6 +1618,8 @@
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
@@ -1704,6 +1718,8 @@
case Primitive::kPrimLong:
switch (input_type) {
DCHECK(out.IsRegister());
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1771,6 +1787,8 @@
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1795,6 +1813,8 @@
case Primitive::kPrimFloat:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1821,6 +1841,8 @@
case Primitive::kPrimDouble:
switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
@@ -1865,17 +1887,7 @@
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
// We can use a leaq or addq if the constant can fit in an immediate.
- HInstruction* rhs = add->InputAt(1);
- bool is_int32_constant = false;
- if (rhs->IsLongConstant()) {
- int64_t value = rhs->AsLongConstant()->GetValue();
- if (static_cast<int32_t>(value) == value) {
- is_int32_constant = true;
- }
- }
- locations->SetInAt(1,
- is_int32_constant ? Location::RegisterOrConstant(rhs) :
- Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(add->InputAt(1)));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
@@ -1973,7 +1985,7 @@
}
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(sub->InputAt(1)));
locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -2007,7 +2019,13 @@
break;
}
case Primitive::kPrimLong: {
- __ subq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second.IsConstant()) {
+ int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(value));
+ __ subq(first.AsRegister<CpuRegister>(), Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ subq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ }
break;
}
@@ -2038,8 +2056,13 @@
}
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(mul->InputAt(1)));
+ if (locations->InAt(1).IsConstant()) {
+ // Can use 3 operand multiply.
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
break;
}
case Primitive::kPrimFloat:
@@ -2059,9 +2082,9 @@
LocationSummary* locations = mul->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
- DCHECK(first.Equals(locations->Out()));
switch (mul->GetResultType()) {
case Primitive::kPrimInt: {
+ DCHECK(first.Equals(locations->Out()));
if (second.IsRegister()) {
__ imull(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
} else if (second.IsConstant()) {
@@ -2075,16 +2098,27 @@
break;
}
case Primitive::kPrimLong: {
- __ imulq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second.IsConstant()) {
+ int64_t value = second.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(value));
+ __ imulq(locations->Out().AsRegister<CpuRegister>(),
+ first.AsRegister<CpuRegister>(),
+ Immediate(static_cast<int32_t>(value)));
+ } else {
+ DCHECK(first.Equals(locations->Out()));
+ __ imulq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ }
break;
}
case Primitive::kPrimFloat: {
+ DCHECK(first.Equals(locations->Out()));
__ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
+ DCHECK(first.Equals(locations->Out()));
__ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
break;
}
@@ -3320,20 +3354,35 @@
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
}
} else if (constant->IsFloatConstant()) {
- Immediate imm(bit_cast<float, int32_t>(constant->AsFloatConstant()->GetValue()));
+ float fp_value = constant->AsFloatConstant()->GetValue();
+ int32_t value = bit_cast<int32_t, float>(fp_value);
+ Immediate imm(value);
if (destination.IsFpuRegister()) {
- __ movl(CpuRegister(TMP), imm);
- __ movd(destination.AsFpuRegister<XmmRegister>(), CpuRegister(TMP));
+ XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
+ if (value == 0) {
+ // easy FP 0.0.
+ __ xorps(dest, dest);
+ } else {
+ __ movl(CpuRegister(TMP), imm);
+ __ movd(dest, CpuRegister(TMP));
+ }
} else {
DCHECK(destination.IsStackSlot()) << destination;
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
}
} else {
DCHECK(constant->IsDoubleConstant()) << constant->DebugName();
- Immediate imm(bit_cast<double, int64_t>(constant->AsDoubleConstant()->GetValue()));
+ double fp_value = constant->AsDoubleConstant()->GetValue();
+ int64_t value = bit_cast<int64_t, double>(fp_value);
+ Immediate imm(value);
if (destination.IsFpuRegister()) {
- __ movq(CpuRegister(TMP), imm);
- __ movd(destination.AsFpuRegister<XmmRegister>(), CpuRegister(TMP));
+ XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
+ if (value == 0) {
+ __ xorpd(dest, dest);
+ } else {
+ __ movq(CpuRegister(TMP), imm);
+ __ movd(dest, CpuRegister(TMP));
+ }
} else {
DCHECK(destination.IsDoubleStackSlot()) << destination;
__ movq(CpuRegister(TMP), imm);
@@ -3673,8 +3722,9 @@
if (instruction->GetType() == Primitive::kPrimInt) {
locations->SetInAt(1, Location::Any());
} else {
- // Request a register to avoid loading a 64bits constant.
+ // We can handle 32 bit constants.
locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(instruction->InputAt(1)));
}
locations->SetOut(Location::SameAsFirstInput());
}
@@ -3730,13 +3780,34 @@
}
} else {
DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ CpuRegister first_reg = first.AsRegister<CpuRegister>();
+ bool second_is_constant = false;
+ int64_t value = 0;
+ if (second.IsConstant()) {
+ second_is_constant = true;
+ value = second.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(value));
+ }
+
if (instruction->IsAnd()) {
- __ andq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second_is_constant) {
+ __ andq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ andq(first_reg, second.AsRegister<CpuRegister>());
+ }
} else if (instruction->IsOr()) {
- __ orq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second_is_constant) {
+ __ orq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ orq(first_reg, second.AsRegister<CpuRegister>());
+ }
} else {
DCHECK(instruction->IsXor());
- __ xorq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ if (second_is_constant) {
+ __ xorq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ xorq(first_reg, second.AsRegister<CpuRegister>());
+ }
}
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 707c999..a380b6a 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -37,6 +37,25 @@
static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
static constexpr size_t kParameterFloatRegistersLength = arraysize(kParameterFloatRegisters);
+static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+static constexpr FloatRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1 };
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+ arraysize(kRuntimeParameterFpuRegisters);
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatRegister> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegister> {
public:
InvokeDexCallingConvention() : CallingConvention(
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 868fc5b..6053ad5 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -145,6 +145,7 @@
std::function<void(HGraph*)> hook_before_codegen,
bool has_result,
Expected expected) {
+ graph->BuildDominatorTree();
SsaLivenessAnalysis liveness(*graph, codegen);
liveness.Analyze();
@@ -473,10 +474,8 @@
HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
graph->AddBlock(first_block);
entry->AddSuccessor(first_block);
- HIntConstant* constant0 = new (&allocator) HIntConstant(0);
- entry->AddInstruction(constant0);
- HIntConstant* constant1 = new (&allocator) HIntConstant(1);
- entry->AddInstruction(constant1);
+ HIntConstant* constant0 = graph->GetIntConstant(0);
+ HIntConstant* constant1 = graph->GetIntConstant(1);
HEqual* equal = new (&allocator) HEqual(constant0, constant0);
first_block->AddInstruction(equal);
first_block->AddInstruction(new (&allocator) HIf(equal));
@@ -581,11 +580,9 @@
code_block->AddSuccessor(exit_block);
graph->SetExitBlock(exit_block);
- HIntConstant cst_lhs(lhs[i]);
- code_block->AddInstruction(&cst_lhs);
- HIntConstant cst_rhs(rhs[i]);
- code_block->AddInstruction(&cst_rhs);
- HLessThan cmp_lt(&cst_lhs, &cst_rhs);
+ HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]);
+ HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
+ HLessThan cmp_lt(cst_lhs, cst_rhs);
code_block->AddInstruction(&cmp_lt);
HReturn ret(&cmp_lt);
code_block->AddInstruction(&ret);
@@ -638,11 +635,9 @@
if_false_block->AddSuccessor(exit_block);
graph->SetExitBlock(exit_block);
- HIntConstant cst_lhs(lhs[i]);
- if_block->AddInstruction(&cst_lhs);
- HIntConstant cst_rhs(rhs[i]);
- if_block->AddInstruction(&cst_rhs);
- HLessThan cmp_lt(&cst_lhs, &cst_rhs);
+ HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]);
+ HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
+ HLessThan cmp_lt(cst_lhs, cst_rhs);
if_block->AddInstruction(&cmp_lt);
// We insert a temporary to separate the HIf from the HLessThan and force
// the materialization of the condition.
@@ -651,13 +646,11 @@
HIf if_lt(&cmp_lt);
if_block->AddInstruction(&if_lt);
- HIntConstant cst_lt(1);
- if_true_block->AddInstruction(&cst_lt);
- HReturn ret_lt(&cst_lt);
+ HIntConstant* cst_lt = graph->GetIntConstant(1);
+ HReturn ret_lt(cst_lt);
if_true_block->AddInstruction(&ret_lt);
- HIntConstant cst_ge(0);
- if_false_block->AddInstruction(&cst_ge);
- HReturn ret_ge(&cst_ge);
+ HIntConstant* cst_ge = graph->GetIntConstant(0);
+ HReturn ret_ge(cst_ge);
if_false_block->AddInstruction(&ret_ge);
auto hook_before_codegen = [](HGraph* graph_in) {
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 9447d3b..fd8c0c6 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -183,6 +183,40 @@
}
}
+static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
+ DCHECK(constant->IsIntConstant() || constant->IsLongConstant() || constant->IsNullConstant());
+
+ // For single uses we let VIXL handle the constant generation since it will
+ // use registers that are not managed by the register allocator (wip0, wip1).
+ if (constant->GetUses().HasOnlyOneUse()) {
+ return true;
+ }
+
+ int64_t value = CodeGenerator::GetInt64ValueOf(constant);
+
+ if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() || instr->IsCompare()) {
+ // Uses aliases of ADD/SUB instructions.
+ return vixl::Assembler::IsImmAddSub(value);
+ } else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
+ // Uses logical operations.
+ return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
+ } else {
+ DCHECK(instr->IsNeg());
+ // Uses mov -immediate.
+ return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
+ }
+}
+
+static inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
+ HInstruction* instr) {
+ if (constant->IsConstant()
+ && CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
+ return Location::ConstantLocation(constant->AsConstant());
+ }
+
+ return Location::RequiresRegister();
+}
+
} // namespace helpers
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index ec0cc3e..b7a92b5 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -55,20 +55,20 @@
if (inst->IsBinaryOperation()) {
// Constant folding: replace `op(a, b)' with a constant at
// compile time if `a' and `b' are both constants.
- HConstant* constant =
- inst->AsBinaryOperation()->TryStaticEvaluation();
+ HConstant* constant = inst->AsBinaryOperation()->TryStaticEvaluation();
if (constant != nullptr) {
- inst->GetBlock()->ReplaceAndRemoveInstructionWith(inst, constant);
+ inst->ReplaceWith(constant);
+ inst->GetBlock()->RemoveInstruction(inst);
} else {
inst->Accept(&simplifier);
}
} else if (inst->IsUnaryOperation()) {
// Constant folding: replace `op(a)' with a constant at compile
// time if `a' is a constant.
- HConstant* constant =
- inst->AsUnaryOperation()->TryStaticEvaluation();
+ HConstant* constant = inst->AsUnaryOperation()->TryStaticEvaluation();
if (constant != nullptr) {
- inst->GetBlock()->ReplaceAndRemoveInstructionWith(inst, constant);
+ inst->ReplaceWith(constant);
+ inst->GetBlock()->RemoveInstruction(inst);
}
} else if (inst->IsDivZeroCheck()) {
// We can safely remove the check if the input is a non-null constant.
@@ -173,9 +173,8 @@
// REM dst, src, src
// with
// CONSTANT 0
- ArenaAllocator* allocator = GetGraph()->GetArena();
- block->ReplaceAndRemoveInstructionWith(instruction,
- HConstant::NewConstant(allocator, type, 0));
+ instruction->ReplaceWith(GetGraph()->GetConstant(type, 0));
+ block->RemoveInstruction(instruction);
}
}
@@ -195,7 +194,6 @@
}
HBasicBlock* block = instruction->GetBlock();
- ArenaAllocator* allocator = GetGraph()->GetArena();
// We assume that GVN has run before, so we only perform a pointer
// comparison. If for some reason the values are equal but the pointers are
@@ -208,8 +206,8 @@
// CONSTANT 0
// Note that we cannot optimise `x - x` to `0` for floating-point. It does
// not work when `x` is an infinity.
- block->ReplaceAndRemoveInstructionWith(instruction,
- HConstant::NewConstant(allocator, type, 0));
+ instruction->ReplaceWith(GetGraph()->GetConstant(type, 0));
+ block->RemoveInstruction(instruction);
}
}
@@ -225,10 +223,8 @@
// CONSTANT 0
Primitive::Type type = instruction->GetType();
HBasicBlock* block = instruction->GetBlock();
- ArenaAllocator* allocator = GetGraph()->GetArena();
-
- block->ReplaceAndRemoveInstructionWith(instruction,
- HConstant::NewConstant(allocator, type, 0));
+ instruction->ReplaceWith(GetGraph()->GetConstant(type, 0));
+ block->RemoveInstruction(instruction);
}
}
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index 6ceccfb..6853d54 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -101,14 +101,16 @@
// Expected difference after constant folding.
diff_t expected_cf_diff = {
{ " 2: IntConstant [5]\n", " 2: IntConstant\n" },
- { " 5: Neg(2) [8]\n", " 12: IntConstant [8]\n" },
+ { " 10: SuspendCheck\n", " 10: SuspendCheck\n"
+ " 12: IntConstant [8]\n" },
+ { " 5: Neg(2) [8]\n", removed },
{ " 8: Return(5)\n", " 8: Return(12)\n" }
};
std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
+ HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), -1);
};
@@ -160,14 +162,16 @@
diff_t expected_cf_diff = {
{ " 3: IntConstant [9]\n", " 3: IntConstant\n" },
{ " 5: IntConstant [9]\n", " 5: IntConstant\n" },
- { " 9: Add(3, 5) [12]\n", " 16: IntConstant [12]\n" },
+ { " 14: SuspendCheck\n", " 14: SuspendCheck\n"
+ " 16: IntConstant [12]\n" },
+ { " 9: Add(3, 5) [12]\n", removed },
{ " 12: Return(9)\n", " 12: Return(16)\n" }
};
std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
+ HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), 3);
};
@@ -195,8 +199,8 @@
* v0 <- 1 0. const/4 v0, #+1
* v1 <- 2 1. const/4 v1, #+2
* v0 <- v0 + v1 2. add-int/2addr v0, v1
- * v1 <- 3 3. const/4 v1, #+3
- * v2 <- 4 4. const/4 v2, #+4
+ * v1 <- 4 3. const/4 v1, #+4
+ * v2 <- 5 4. const/4 v2, #+5
* v1 <- v1 + v2 5. add-int/2addr v1, v2
* v2 <- v0 + v1 6. add-int v2, v0, v1
* return v2 8. return v2
@@ -206,8 +210,8 @@
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
Instruction::ADD_INT_2ADDR | 0 << 8 | 1 << 12,
- Instruction::CONST_4 | 1 << 8 | 3 << 12,
- Instruction::CONST_4 | 2 << 8 | 4 << 12,
+ Instruction::CONST_4 | 1 << 8 | 4 << 12,
+ Instruction::CONST_4 | 2 << 8 | 5 << 12,
Instruction::ADD_INT_2ADDR | 1 << 8 | 2 << 12,
Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
Instruction::RETURN | 2 << 8);
@@ -234,24 +238,28 @@
{ " 5: IntConstant [9]\n", " 5: IntConstant\n" },
{ " 11: IntConstant [17]\n", " 11: IntConstant\n" },
{ " 13: IntConstant [17]\n", " 13: IntConstant\n" },
- { " 9: Add(3, 5) [21]\n", " 28: IntConstant\n" },
- { " 17: Add(11, 13) [21]\n", " 29: IntConstant\n" },
- { " 21: Add(9, 17) [24]\n", " 30: IntConstant [24]\n" },
+ { " 26: SuspendCheck\n", " 26: SuspendCheck\n"
+ " 28: IntConstant\n"
+ " 29: IntConstant\n"
+ " 30: IntConstant [24]\n" },
+ { " 9: Add(3, 5) [21]\n", removed },
+ { " 17: Add(11, 13) [21]\n", removed },
+ { " 21: Add(9, 17) [24]\n", removed },
{ " 24: Return(21)\n", " 24: Return(30)\n" }
};
std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the values of the computed constants.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst1 = graph->GetBlock(1)->GetFirstInstruction();
+ HInstruction* inst1 = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst1->IsIntConstant());
- ASSERT_EQ(inst1->AsIntConstant()->GetValue(), 3);
- HInstruction* inst2 = inst1->GetNext();
+ ASSERT_EQ(inst1->AsIntConstant()->GetValue(), 12);
+ HInstruction* inst2 = inst1->GetPrevious();
ASSERT_TRUE(inst2->IsIntConstant());
- ASSERT_EQ(inst2->AsIntConstant()->GetValue(), 7);
- HInstruction* inst3 = inst2->GetNext();
+ ASSERT_EQ(inst2->AsIntConstant()->GetValue(), 9);
+ HInstruction* inst3 = inst2->GetPrevious();
ASSERT_TRUE(inst3->IsIntConstant());
- ASSERT_EQ(inst3->AsIntConstant()->GetValue(), 10);
+ ASSERT_EQ(inst3->AsIntConstant()->GetValue(), 3);
};
// Expected difference after dead code elimination.
@@ -306,14 +314,16 @@
diff_t expected_cf_diff = {
{ " 3: IntConstant [9]\n", " 3: IntConstant\n" },
{ " 5: IntConstant [9]\n", " 5: IntConstant\n" },
- { " 9: Sub(3, 5) [12]\n", " 16: IntConstant [12]\n" },
+ { " 14: SuspendCheck\n", " 14: SuspendCheck\n"
+ " 16: IntConstant [12]\n" },
+ { " 9: Sub(3, 5) [12]\n", removed },
{ " 12: Return(9)\n", " 12: Return(16)\n" }
};
std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
+ HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), 1);
};
@@ -368,14 +378,16 @@
diff_t expected_cf_diff = {
{ " 6: LongConstant [12]\n", " 6: LongConstant\n" },
{ " 8: LongConstant [12]\n", " 8: LongConstant\n" },
- { " 12: Add(6, 8) [15]\n", " 19: LongConstant [15]\n" },
+ { " 17: SuspendCheck\n", " 17: SuspendCheck\n"
+ " 19: LongConstant [15]\n" },
+ { " 12: Add(6, 8) [15]\n", removed },
{ " 15: Return(12)\n", " 15: Return(19)\n" }
};
std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
+ HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsLongConstant());
ASSERT_EQ(inst->AsLongConstant()->GetValue(), 3);
};
@@ -431,14 +443,16 @@
diff_t expected_cf_diff = {
{ " 6: LongConstant [12]\n", " 6: LongConstant\n" },
{ " 8: LongConstant [12]\n", " 8: LongConstant\n" },
- { " 12: Sub(6, 8) [15]\n", " 19: LongConstant [15]\n" },
+ { " 17: SuspendCheck\n", " 17: SuspendCheck\n"
+ " 19: LongConstant [15]\n" },
+ { " 12: Sub(6, 8) [15]\n", removed },
{ " 15: Return(12)\n", " 15: Return(19)\n" }
};
std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
+ HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsLongConstant());
ASSERT_EQ(inst->AsLongConstant()->GetValue(), 1);
};
@@ -469,51 +483,51 @@
* 16-bit
* offset
* ------
- * v0 <- 0 0. const/4 v0, #+0
- * v1 <- 1 1. const/4 v1, #+1
+ * v0 <- 1 0. const/4 v0, #+1
+ * v1 <- 2 1. const/4 v1, #+2
* v2 <- v0 + v1 2. add-int v2, v0, v1
* goto L2 4. goto +4
- * L1: v1 <- v0 + 3 5. add-int/lit16 v1, v0, #+3
+ * L1: v1 <- v0 + 5 5. add-int/lit16 v1, v0, #+5
* goto L3 7. goto +4
- * L2: v0 <- v2 + 2 8. add-int/lit16 v0, v2, #+2
+ * L2: v0 <- v2 + 4 8. add-int/lit16 v0, v2, #+4
* goto L1 10. goto +(-5)
- * L3: v2 <- v1 + 4 11. add-int/lit16 v2, v1, #+4
+ * L3: v2 <- v1 + 8 11. add-int/lit16 v2, v1, #+8
* return v2 13. return v2
*/
TEST(ConstantFolding, IntConstantFoldingAndJumps) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
- Instruction::CONST_4 | 0 << 8 | 0 << 12,
- Instruction::CONST_4 | 1 << 8 | 1 << 12,
+ Instruction::CONST_4 | 0 << 8 | 1 << 12,
+ Instruction::CONST_4 | 1 << 8 | 2 << 12,
Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
Instruction::GOTO | 4 << 8,
- Instruction::ADD_INT_LIT16 | 1 << 8 | 0 << 12, 3,
+ Instruction::ADD_INT_LIT16 | 1 << 8 | 0 << 12, 5,
Instruction::GOTO | 4 << 8,
- Instruction::ADD_INT_LIT16 | 0 << 8 | 2 << 12, 2,
+ Instruction::ADD_INT_LIT16 | 0 << 8 | 2 << 12, 4,
static_cast<uint16_t>(Instruction::GOTO | -5 << 8),
- Instruction::ADD_INT_LIT16 | 2 << 8 | 1 << 12, 4,
+ Instruction::ADD_INT_LIT16 | 2 << 8 | 1 << 12, 8,
Instruction::RETURN | 2 << 8);
std::string expected_before =
"BasicBlock 0, succ: 1\n"
- " 3: IntConstant [9]\n" // v0 <- 0
- " 5: IntConstant [9]\n" // v1 <- 1
- " 13: IntConstant [14]\n" // const 3
- " 18: IntConstant [19]\n" // const 2
- " 24: IntConstant [25]\n" // const 4
+ " 3: IntConstant [9]\n" // v0 <- 1
+ " 5: IntConstant [9]\n" // v1 <- 2
+ " 13: IntConstant [14]\n" // const 5
+ " 18: IntConstant [19]\n" // const 4
+ " 24: IntConstant [25]\n" // const 8
" 30: SuspendCheck\n"
" 31: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 3\n"
- " 9: Add(3, 5) [19]\n" // v2 <- v0 + v1 = 0 + 1 = 1
+ " 9: Add(3, 5) [19]\n" // v2 <- v0 + v1 = 1 + 2 = 3
" 11: Goto 3\n" // goto L2
"BasicBlock 2, pred: 3, succ: 4\n" // L1:
- " 14: Add(19, 13) [25]\n" // v1 <- v0 + 3 = 3 + 3 = 6
+ " 14: Add(19, 13) [25]\n" // v1 <- v0 + 3 = 7 + 5 = 12
" 16: Goto 4\n" // goto L3
"BasicBlock 3, pred: 1, succ: 2\n" // L2:
- " 19: Add(9, 18) [14]\n" // v0 <- v2 + 2 = 1 + 2 = 3
+ " 19: Add(9, 18) [14]\n" // v0 <- v2 + 2 = 3 + 4 = 7
" 21: SuspendCheck\n"
" 22: Goto 2\n" // goto L1
"BasicBlock 4, pred: 2, succ: 5\n" // L3:
- " 25: Add(14, 24) [28]\n" // v2 <- v1 + 4 = 6 + 4 = 10
+ " 25: Add(14, 24) [28]\n" // v2 <- v1 + 4 = 12 + 8 = 20
" 28: Return(25)\n" // return v2
"BasicBlock 5, pred: 4\n"
" 29: Exit\n";
@@ -525,28 +539,33 @@
{ " 13: IntConstant [14]\n", " 13: IntConstant\n" },
{ " 18: IntConstant [19]\n", " 18: IntConstant\n" },
{ " 24: IntConstant [25]\n", " 24: IntConstant\n" },
- { " 9: Add(3, 5) [19]\n", " 32: IntConstant []\n" },
- { " 14: Add(19, 13) [25]\n", " 34: IntConstant\n" },
- { " 19: Add(9, 18) [14]\n", " 33: IntConstant []\n" },
- { " 25: Add(14, 24) [28]\n", " 35: IntConstant [28]\n" },
+ { " 30: SuspendCheck\n", " 30: SuspendCheck\n"
+ " 32: IntConstant []\n"
+ " 33: IntConstant []\n"
+ " 34: IntConstant\n"
+ " 35: IntConstant [28]\n" },
+ { " 9: Add(3, 5) [19]\n", removed },
+ { " 14: Add(19, 13) [25]\n", removed },
+ { " 19: Add(9, 18) [14]\n", removed },
+ { " 25: Add(14, 24) [28]\n", removed },
{ " 28: Return(25)\n", " 28: Return(35)\n"}
};
std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the values of the computed constants.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst1 = graph->GetBlock(1)->GetFirstInstruction();
+ HInstruction* inst1 = graph->GetBlock(4)->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst1->IsIntConstant());
- ASSERT_EQ(inst1->AsIntConstant()->GetValue(), 1);
- HInstruction* inst2 = graph->GetBlock(2)->GetFirstInstruction();
+ ASSERT_EQ(inst1->AsIntConstant()->GetValue(), 20);
+ HInstruction* inst2 = inst1->GetPrevious();
ASSERT_TRUE(inst2->IsIntConstant());
- ASSERT_EQ(inst2->AsIntConstant()->GetValue(), 6);
- HInstruction* inst3 = graph->GetBlock(3)->GetFirstInstruction();
+ ASSERT_EQ(inst2->AsIntConstant()->GetValue(), 12);
+ HInstruction* inst3 = inst2->GetPrevious();
ASSERT_TRUE(inst3->IsIntConstant());
- ASSERT_EQ(inst3->AsIntConstant()->GetValue(), 3);
- HInstruction* inst4 = graph->GetBlock(4)->GetFirstInstruction();
+ ASSERT_EQ(inst3->AsIntConstant()->GetValue(), 7);
+ HInstruction* inst4 = inst3->GetPrevious();
ASSERT_TRUE(inst4->IsIntConstant());
- ASSERT_EQ(inst4->AsIntConstant()->GetValue(), 10);
+ ASSERT_EQ(inst4->AsIntConstant()->GetValue(), 3);
};
// Expected difference after dead code elimination.
@@ -611,25 +630,25 @@
// Expected difference after constant folding.
diff_t expected_cf_diff = {
- { " 3: IntConstant [15, 22, 8]\n", " 3: IntConstant [15, 22]\n" },
+ { " 3: IntConstant [15, 22, 8]\n", " 3: IntConstant [9, 15, 22]\n" },
{ " 5: IntConstant [22, 8]\n", " 5: IntConstant [22]\n" },
- { " 8: GreaterThanOrEqual(3, 5) [9]\n", " 23: IntConstant [9]\n" },
- { " 9: If(8)\n", " 9: If(23)\n" }
+ { " 8: GreaterThanOrEqual(3, 5) [9]\n", removed },
+ { " 9: If(8)\n", " 9: If(3)\n" }
};
std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the values of the computed constants.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
+ HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), 1);
};
// Expected difference after dead code elimination.
diff_t expected_dce_diff = {
- { " 3: IntConstant [15, 22]\n", " 3: IntConstant [22]\n" },
- { " 22: Phi(3, 5) [15]\n", " 22: Phi(3, 5)\n" },
- { " 15: Add(22, 3)\n", removed }
+ { " 3: IntConstant [9, 15, 22]\n", " 3: IntConstant [9, 22]\n" },
+ { " 22: Phi(3, 5) [15]\n", " 22: Phi(3, 5)\n" },
+ { " 15: Add(22, 3)\n", removed }
};
std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 76b9f4f..7c3c2bf 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -80,8 +80,7 @@
}
// Ensure `block` ends with a branch instruction.
- HInstruction* last_inst = block->GetLastInstruction();
- if (last_inst == nullptr || !last_inst->IsControlFlow()) {
+ if (!block->EndsWithControlFlowInstruction()) {
AddError(StringPrintf("Block %d does not end with a branch instruction.",
block->GetBlockId()));
}
@@ -227,13 +226,13 @@
} else {
HLoopInformation* loop_information = loop_header->GetLoopInformation();
HBasicBlock* first_predecessor = loop_header->GetPredecessors().Get(0);
- if (loop_information->IsBackEdge(first_predecessor)) {
+ if (loop_information->IsBackEdge(*first_predecessor)) {
AddError(StringPrintf(
"First predecessor of loop header %d is a back edge.",
id));
}
HBasicBlock* second_predecessor = loop_header->GetPredecessors().Get(1);
- if (!loop_information->IsBackEdge(second_predecessor)) {
+ if (!loop_information->IsBackEdge(*second_predecessor)) {
AddError(StringPrintf(
"Second predecessor of loop header %d is not a back edge.",
id));
@@ -476,4 +475,15 @@
}
}
+void SSAChecker::VisitConstant(HConstant* instruction) {
+ HBasicBlock* block = instruction->GetBlock();
+ if (!block->IsEntryBlock()) {
+ AddError(StringPrintf(
+ "%s %d should be in the entry block but is in block %d.",
+ instruction->DebugName(),
+ instruction->GetId(),
+ block->GetBlockId()));
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 5ec3003..89fea0a 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -107,6 +107,7 @@
void VisitBinaryOperation(HBinaryOperation* op) OVERRIDE;
void VisitCondition(HCondition* op) OVERRIDE;
void VisitIf(HIf* instruction) OVERRIDE;
+ void VisitConstant(HConstant* instruction) OVERRIDE;
private:
DISALLOW_COPY_AND_ASSIGN(SSAChecker);
diff --git a/compiler/optimizing/graph_test.cc b/compiler/optimizing/graph_test.cc
index 4742e4d..50398b4 100644
--- a/compiler/optimizing/graph_test.cc
+++ b/compiler/optimizing/graph_test.cc
@@ -28,8 +28,7 @@
static HBasicBlock* createIfBlock(HGraph* graph, ArenaAllocator* allocator) {
HBasicBlock* if_block = new (allocator) HBasicBlock(graph);
graph->AddBlock(if_block);
- HInstruction* instr = new (allocator) HIntConstant(4);
- if_block->AddInstruction(instr);
+ HInstruction* instr = graph->GetIntConstant(4);
HInstruction* equal = new (allocator) HEqual(instr, instr);
if_block->AddInstruction(equal);
instr = new (allocator) HIf(equal);
@@ -45,6 +44,12 @@
return block;
}
+static HBasicBlock* createEntryBlock(HGraph* graph, ArenaAllocator* allocator) {
+ HBasicBlock* block = createGotoBlock(graph, allocator);
+ graph->SetEntryBlock(block);
+ return block;
+}
+
static HBasicBlock* createReturnBlock(HGraph* graph, ArenaAllocator* allocator) {
HBasicBlock* block = new (allocator) HBasicBlock(graph);
graph->AddBlock(block);
@@ -69,7 +74,7 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
- HBasicBlock* entry_block = createGotoBlock(graph, &allocator);
+ HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
HBasicBlock* if_true = createGotoBlock(graph, &allocator);
HBasicBlock* return_block = createReturnBlock(graph, &allocator);
@@ -104,7 +109,7 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
- HBasicBlock* entry_block = createGotoBlock(graph, &allocator);
+ HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
HBasicBlock* if_false = createGotoBlock(graph, &allocator);
HBasicBlock* return_block = createReturnBlock(graph, &allocator);
@@ -139,12 +144,11 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
- HBasicBlock* entry_block = createGotoBlock(graph, &allocator);
+ HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
HBasicBlock* return_block = createReturnBlock(graph, &allocator);
HBasicBlock* exit_block = createExitBlock(graph, &allocator);
- graph->SetEntryBlock(entry_block);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(if_block);
if_block->AddSuccessor(return_block);
@@ -175,12 +179,11 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
- HBasicBlock* entry_block = createGotoBlock(graph, &allocator);
+ HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
HBasicBlock* return_block = createReturnBlock(graph, &allocator);
HBasicBlock* exit_block = createExitBlock(graph, &allocator);
- graph->SetEntryBlock(entry_block);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(return_block);
if_block->AddSuccessor(if_block);
@@ -211,13 +214,12 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
- HBasicBlock* entry_block = createGotoBlock(graph, &allocator);
+ HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* first_if_block = createIfBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
HBasicBlock* loop_block = createGotoBlock(graph, &allocator);
HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- graph->SetEntryBlock(entry_block);
entry_block->AddSuccessor(first_if_block);
first_if_block->AddSuccessor(if_block);
first_if_block->AddSuccessor(loop_block);
@@ -251,13 +253,12 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
- HBasicBlock* entry_block = createGotoBlock(graph, &allocator);
+ HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* first_if_block = createIfBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
HBasicBlock* loop_block = createGotoBlock(graph, &allocator);
HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- graph->SetEntryBlock(entry_block);
entry_block->AddSuccessor(first_if_block);
first_if_block->AddSuccessor(if_block);
first_if_block->AddSuccessor(loop_block);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index cabfa48..49c0d38 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -149,6 +149,8 @@
codegen_.DumpCoreRegister(output_, location.low());
output_ << " and ";
codegen_.DumpCoreRegister(output_, location.high());
+ } else if (location.IsUnallocated()) {
+ output_ << "<U>";
} else {
DCHECK(location.IsDoubleStackSlot());
output_ << "2x" << location.GetStackIndex() << "(sp)";
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index bd9267c..2c17a67 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -49,8 +49,11 @@
for (HInstruction* instruction = block->GetFirstInstruction(); instruction != nullptr;) {
HInstruction* next = instruction->GetNext();
HInvokeStaticOrDirect* call = instruction->AsInvokeStaticOrDirect();
- if (call != nullptr) {
- if (!TryInline(call, call->GetDexMethodIndex(), call->GetInvokeType())) {
+ // As long as the call is not intrinsified, it is worth trying to inline.
+ if (call != nullptr && call->GetIntrinsic() == Intrinsics::kNone) {
+ // We use the original invoke type to ensure the resolution of the called method
+ // works properly.
+ if (!TryInline(call, call->GetDexMethodIndex(), call->GetOriginalInvokeType())) {
if (kIsDebugBuild) {
std::string callee_name =
PrettyMethod(call->GetDexMethodIndex(), *outer_compilation_unit_.GetDexFile());
@@ -68,64 +71,87 @@
uint32_t method_index,
InvokeType invoke_type) const {
ScopedObjectAccess soa(Thread::Current());
- const DexFile& outer_dex_file = *outer_compilation_unit_.GetDexFile();
- VLOG(compiler) << "Try inlining " << PrettyMethod(method_index, outer_dex_file);
+ const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
+ VLOG(compiler) << "Try inlining " << PrettyMethod(method_index, caller_dex_file);
StackHandleScope<3> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(outer_compilation_unit_.GetClassLinker()->FindDexCache(outer_dex_file)));
+ hs.NewHandle(caller_compilation_unit_.GetClassLinker()->FindDexCache(caller_dex_file)));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader*>(outer_compilation_unit_.GetClassLoader())));
+ soa.Decode<mirror::ClassLoader*>(caller_compilation_unit_.GetClassLoader())));
Handle<mirror::ArtMethod> resolved_method(hs.NewHandle(
compiler_driver_->ResolveMethod(
- soa, dex_cache, class_loader, &outer_compilation_unit_, method_index, invoke_type)));
+ soa, dex_cache, class_loader, &caller_compilation_unit_, method_index, invoke_type)));
if (resolved_method.Get() == nullptr) {
- VLOG(compiler) << "Method cannot be resolved " << PrettyMethod(method_index, outer_dex_file);
+ VLOG(compiler) << "Method cannot be resolved " << PrettyMethod(method_index, caller_dex_file);
return false;
}
+ bool can_use_dex_cache = true;
+ const DexFile& outer_dex_file = *outer_compilation_unit_.GetDexFile();
if (resolved_method->GetDexFile()->GetLocation().compare(outer_dex_file.GetLocation()) != 0) {
- VLOG(compiler) << "Did not inline "
- << PrettyMethod(method_index, outer_dex_file)
- << " because it is in a different dex file";
- return false;
+ can_use_dex_cache = false;
}
const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
if (code_item == nullptr) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " is not inlined because it is native";
return false;
}
if (code_item->insns_size_in_code_units_ > kMaxInlineCodeUnits) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " is too big to inline";
return false;
}
if (code_item->tries_size_ != 0) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " is not inlined because of try block";
return false;
}
if (!resolved_method->GetDeclaringClass()->IsVerified()) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " is not inlined because its class could not be verified";
return false;
}
+ if (resolved_method->ShouldNotInline()) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ << " was already flagged as non inlineable";
+ return false;
+ }
+
+ if (!TryBuildAndInline(resolved_method, invoke_instruction, method_index, can_use_dex_cache)) {
+ resolved_method->SetShouldNotInline();
+ return false;
+ }
+
+ VLOG(compiler) << "Successfully inlined " << PrettyMethod(method_index, caller_dex_file);
+ MaybeRecordStat(kInlinedInvoke);
+ return true;
+}
+
+bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method,
+ HInvoke* invoke_instruction,
+ uint32_t method_index,
+ bool can_use_dex_cache) const {
+ ScopedObjectAccess soa(Thread::Current());
+ const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
+ const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
+
DexCompilationUnit dex_compilation_unit(
nullptr,
- outer_compilation_unit_.GetClassLoader(),
- outer_compilation_unit_.GetClassLinker(),
- outer_dex_file,
+ caller_compilation_unit_.GetClassLoader(),
+ caller_compilation_unit_.GetClassLinker(),
+ *resolved_method->GetDexFile(),
code_item,
resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
- method_index,
+ resolved_method->GetDexMethodIndex(),
resolved_method->GetAccessFlags(),
nullptr);
@@ -136,25 +162,25 @@
HGraphBuilder builder(callee_graph,
&dex_compilation_unit,
&outer_compilation_unit_,
- &outer_dex_file,
+ resolved_method->GetDexFile(),
compiler_driver_,
&inline_stats);
if (!builder.BuildGraph(*code_item)) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " could not be built, so cannot be inlined";
return false;
}
if (!RegisterAllocator::CanAllocateRegistersFor(*callee_graph,
compiler_driver_->GetInstructionSet())) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " cannot be inlined because of the register allocator";
return false;
}
if (!callee_graph->TryBuildingSsa()) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " could not be transformed to SSA";
return false;
}
@@ -176,8 +202,12 @@
}
if (depth_ + 1 < kDepthLimit) {
- HInliner inliner(
- callee_graph, outer_compilation_unit_, compiler_driver_, stats_, depth_ + 1);
+ HInliner inliner(callee_graph,
+ outer_compilation_unit_,
+ dex_compilation_unit,
+ compiler_driver_,
+ stats_,
+ depth_ + 1);
inliner.Run();
}
@@ -186,7 +216,7 @@
for (; !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
if (block->IsLoopHeader()) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " could not be inlined because it contains a loop";
return false;
}
@@ -200,18 +230,25 @@
}
if (current->CanThrow()) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " could not be inlined because " << current->DebugName()
<< " can throw";
return false;
}
if (current->NeedsEnvironment()) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " could not be inlined because " << current->DebugName()
<< " needs an environment";
return false;
}
+
+ if (!can_use_dex_cache && current->NeedsDexCache()) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ << " could not be inlined because " << current->DebugName()
+ << " it is in a different dex file and requires access to the dex cache";
+ return false;
+ }
}
}
@@ -225,8 +262,6 @@
// instruction id of the caller, so that new instructions added
// after optimizations get a unique id.
graph_->SetCurrentInstructionId(callee_graph->GetNextInstructionId());
- VLOG(compiler) << "Successfully inlined " << PrettyMethod(method_index, outer_dex_file);
- MaybeRecordStat(kInlinedInvoke);
return true;
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 2b08d3d..1dbc7d3 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -32,11 +32,13 @@
public:
HInliner(HGraph* outer_graph,
const DexCompilationUnit& outer_compilation_unit,
+ const DexCompilationUnit& caller_compilation_unit,
CompilerDriver* compiler_driver,
OptimizingCompilerStats* stats,
size_t depth = 0)
: HOptimization(outer_graph, true, kInlinerPassName, stats),
outer_compilation_unit_(outer_compilation_unit),
+ caller_compilation_unit_(caller_compilation_unit),
compiler_driver_(compiler_driver),
depth_(depth) {}
@@ -46,8 +48,13 @@
private:
bool TryInline(HInvoke* invoke_instruction, uint32_t method_index, InvokeType invoke_type) const;
+ bool TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method,
+ HInvoke* invoke_instruction,
+ uint32_t method_index,
+ bool can_use_dex_cache) const;
const DexCompilationUnit& outer_compilation_unit_;
+ const DexCompilationUnit& caller_compilation_unit_;
CompilerDriver* const compiler_driver_;
const size_t depth_;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 2ef19b9..56ec8a7 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -292,8 +292,7 @@
// MUL dst, src, pow_of_2
// with
// SHL dst, src, log2(pow_of_2)
- HIntConstant* shift = new (allocator) HIntConstant(WhichPowerOf2(factor));
- block->InsertInstructionBefore(shift, instruction);
+ HIntConstant* shift = GetGraph()->GetIntConstant(WhichPowerOf2(factor));
HShl* shl = new(allocator) HShl(type, input_other, shift);
block->ReplaceAndRemoveInstructionWith(instruction, shl);
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 36cf856..628a844 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -191,8 +191,10 @@
case kIntrinsicCompareTo:
return Intrinsics::kStringCompareTo;
case kIntrinsicIsEmptyOrLength:
- return ((method.d.data & kIntrinsicFlagIsEmpty) == 0) ?
- Intrinsics::kStringLength : Intrinsics::kStringIsEmpty;
+ // The inliner can handle these two cases - and this is the preferred approach
+ // since after inlining the call is no longer visible (as opposed to waiting
+ // until codegen to handle intrinsic).
+ return Intrinsics::kNone;
case kIntrinsicIndexOf:
return ((method.d.data & kIntrinsicFlagBase0) == 0) ?
Intrinsics::kStringIndexOfAfter : Intrinsics::kStringIndexOf;
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 0c9eb94..33176f0 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -847,6 +847,36 @@
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM::VisitStringCompareTo(HInvoke* invoke) {
+ // The inputs plus one temp.
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(R0));
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringCompareTo(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheck());
+
+ Register argument = locations->InAt(1).AsRegister<Register>();
+ __ cmp(argument, ShifterOperand(0));
+ SlowPathCodeARM* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), EQ);
+
+ __ LoadFromOffset(
+ kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pStringCompareTo).Int32Value());
+ __ blx(LR);
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -873,9 +903,6 @@
UNIMPLEMENTED_INTRINSIC(MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(StringCompareTo)
-UNIMPLEMENTED_INTRINSIC(StringIsEmpty) // Might not want to do these two anyways, inlining should
-UNIMPLEMENTED_INTRINSIC(StringLength) // be good enough here.
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 19b04ae..72d303c 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -40,6 +40,7 @@
using helpers::DRegisterFrom;
using helpers::FPRegisterFrom;
using helpers::HeapOperand;
+using helpers::LocationFrom;
using helpers::RegisterFrom;
using helpers::SRegisterFrom;
using helpers::WRegisterFrom;
@@ -990,6 +991,36 @@
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) {
+ // The inputs plus one temp.
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
+ vixl::MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheck());
+
+ Register argument = WRegisterFrom(locations->InAt(1));
+ __ Cmp(argument, 0);
+ SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ B(eq, slow_path->GetEntryLabel());
+
+ __ Ldr(
+ lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pStringCompareTo).Int32Value()));
+ __ Blr(lr);
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -999,9 +1030,6 @@
}
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(StringCompareTo)
-UNIMPLEMENTED_INTRINSIC(StringIsEmpty) // Might not want to do these two anyways, inlining should
-UNIMPLEMENTED_INTRINSIC(StringLength) // be good enough here.
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index 9cc77c6..10f6e1d 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -60,10 +60,8 @@
V(MemoryPokeShortNative, kStatic) \
V(StringCharAt, kDirect) \
V(StringCompareTo, kDirect) \
- V(StringIsEmpty, kDirect) \
V(StringIndexOf, kDirect) \
V(StringIndexOfAfter, kDirect) \
- V(StringLength, kDirect) \
V(UnsafeCASInt, kDirect) \
V(UnsafeCASLong, kDirect) \
V(UnsafeCASObject, kDirect) \
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
new file mode 100644
index 0000000..384737f
--- /dev/null
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -0,0 +1,1208 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "intrinsics_x86.h"
+
+#include "code_generator_x86.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "intrinsics.h"
+#include "mirror/array-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/string.h"
+#include "thread.h"
+#include "utils/x86/assembler_x86.h"
+#include "utils/x86/constants_x86.h"
+
+namespace art {
+
+namespace x86 {
+
+static constexpr int kDoubleNaNHigh = 0x7FF80000;
+static constexpr int kDoubleNaNLow = 0x00000000;
+static constexpr int kFloatNaN = 0x7FC00000;
+
+X86Assembler* IntrinsicCodeGeneratorX86::GetAssembler() {
+ return reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
+}
+
+ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() {
+ return codegen_->GetGraph()->GetArena();
+}
+
+bool IntrinsicLocationsBuilderX86::TryDispatch(HInvoke* invoke) {
+ Dispatch(invoke);
+ LocationSummary* res = invoke->GetLocations();
+ return res != nullptr && res->Intrinsified();
+}
+
+#define __ reinterpret_cast<X86Assembler*>(codegen->GetAssembler())->
+
+// TODO: target as memory.
+static void MoveFromReturnRegister(Location target,
+ Primitive::Type type,
+ CodeGeneratorX86* codegen) {
+ if (!target.IsValid()) {
+ DCHECK(type == Primitive::kPrimVoid);
+ return;
+ }
+
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ Register target_reg = target.AsRegister<Register>();
+ if (target_reg != EAX) {
+ __ movl(target_reg, EAX);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ Register target_reg_lo = target.AsRegisterPairLow<Register>();
+ Register target_reg_hi = target.AsRegisterPairHigh<Register>();
+ if (target_reg_lo != EAX) {
+ __ movl(target_reg_lo, EAX);
+ }
+ if (target_reg_hi != EDX) {
+ __ movl(target_reg_hi, EDX);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected void type for valid location " << target;
+ UNREACHABLE();
+
+ case Primitive::kPrimDouble: {
+ XmmRegister target_reg = target.AsFpuRegister<XmmRegister>();
+ if (target_reg != XMM0) {
+ __ movsd(target_reg, XMM0);
+ }
+ break;
+ }
+ case Primitive::kPrimFloat: {
+ XmmRegister target_reg = target.AsFpuRegister<XmmRegister>();
+ if (target_reg != XMM0) {
+ __ movss(target_reg, XMM0);
+ }
+ break;
+ }
+ }
+}
+
+static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorX86* codegen) {
+ if (invoke->InputCount() == 0) {
+ return;
+ }
+
+ LocationSummary* locations = invoke->GetLocations();
+ InvokeDexCallingConventionVisitor calling_convention_visitor;
+
+ // We're moving potentially two or more locations to locations that could overlap, so we need
+ // a parallel move resolver.
+ HParallelMove parallel_move(arena);
+
+ for (size_t i = 0; i < invoke->InputCount(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
+ Location actual_loc = locations->InAt(i);
+
+ parallel_move.AddMove(actual_loc, cc_loc, nullptr);
+ }
+
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+}
+
+// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
+// call. This will copy the arguments into the positions for a regular call.
+//
+// Note: The actual parameters are required to be in the locations given by the invoke's location
+// summary. If an intrinsic modifies those locations before a slowpath call, they must be
+// restored!
+class IntrinsicSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ explicit IntrinsicSlowPathX86(HInvoke* invoke, Register temp)
+ : invoke_(invoke) {
+ // The temporary register has to be EAX for x86 invokes.
+ DCHECK_EQ(temp, EAX);
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ CodeGeneratorX86* codegen = down_cast<CodeGeneratorX86*>(codegen_in);
+ __ Bind(GetEntryLabel());
+
+ SaveLiveRegisters(codegen, invoke_->GetLocations());
+
+ MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+
+ if (invoke_->IsInvokeStaticOrDirect()) {
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), EAX);
+ } else {
+ UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
+ UNREACHABLE();
+ }
+
+ // Copy the result back to the expected output.
+ Location out = invoke_->GetLocations()->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory.
+ DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ MoveFromReturnRegister(out, invoke_->GetType(), codegen);
+ }
+
+ RestoreLiveRegisters(codegen, invoke_->GetLocations());
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ // The instruction where this slow path is happening.
+ HInvoke* const invoke_;
+
+ DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathX86);
+};
+
+#undef __
+#define __ assembler->
+
+static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ if (is64bit) {
+ locations->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ if (is64bit) {
+ locations->AddTemp(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+static void MoveFPToInt(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
+ Location input = locations->InAt(0);
+ Location output = locations->Out();
+ if (is64bit) {
+ // Need to use the temporary.
+ XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ __ movsd(temp, input.AsFpuRegister<XmmRegister>());
+ __ movd(output.AsRegisterPairLow<Register>(), temp);
+ __ psrlq(temp, Immediate(32));
+ __ movd(output.AsRegisterPairHigh<Register>(), temp);
+ } else {
+ __ movd(output.AsRegister<Register>(), input.AsFpuRegister<XmmRegister>());
+ }
+}
+
+static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
+ Location input = locations->InAt(0);
+ Location output = locations->Out();
+ if (is64bit) {
+ // Need to use the temporary.
+ XmmRegister temp1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister temp2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ __ movd(temp1, input.AsRegisterPairLow<Register>());
+ __ movd(temp2, input.AsRegisterPairHigh<Register>());
+ __ punpckldq(temp1, temp2);
+ __ movsd(output.AsFpuRegister<XmmRegister>(), temp1);
+ } else {
+ __ movd(output.AsFpuRegister<XmmRegister>(), input.AsRegister<Register>());
+ }
+}
+
+void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
+ CreateFPToIntLocations(arena_, invoke, true);
+}
+void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
+ CreateIntToFPLocations(arena_, invoke, true);
+}
+
+void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
+ MoveFPToInt(invoke->GetLocations(), true, GetAssembler());
+}
+void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
+ MoveIntToFP(invoke->GetLocations(), true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
+ CreateFPToIntLocations(arena_, invoke, false);
+}
+void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
+ CreateIntToFPLocations(arena_, invoke, false);
+}
+
+void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
+ MoveFPToInt(invoke->GetLocations(), false, GetAssembler());
+}
+void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
+ MoveIntToFP(invoke->GetLocations(), false, GetAssembler());
+}
+
+static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+static void CreateLongToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+static void CreateLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+static void GenReverseBytes(LocationSummary* locations,
+ Primitive::Type size,
+ X86Assembler* assembler) {
+ Register out = locations->Out().AsRegister<Register>();
+
+ switch (size) {
+ case Primitive::kPrimShort:
+ // TODO: Can be done with an xchg of 8b registers. This is straight from Quick.
+ __ bswapl(out);
+ __ sarl(out, Immediate(16));
+ break;
+ case Primitive::kPrimInt:
+ __ bswapl(out);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected size for reverse-bytes: " << size;
+ UNREACHABLE();
+ }
+}
+
+void IntrinsicLocationsBuilderX86::VisitIntegerReverseBytes(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitIntegerReverseBytes(HInvoke* invoke) {
+ GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitShortReverseBytes(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitShortReverseBytes(HInvoke* invoke) {
+ GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
+}
+
+
+// TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we
+// need is 64b.
+
+static void CreateFloatToFloat(ArenaAllocator* arena, HInvoke* invoke) {
+ // TODO: Enable memory operations when the assembler supports them.
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ // TODO: Allow x86 to work with memory. This requires assembler support, see below.
+ // locations->SetInAt(0, Location::Any()); // X86 can work on memory directly.
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+static void MathAbsFP(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
+ Location output = locations->Out();
+
+ if (output.IsFpuRegister()) {
+ // Create the right constant on an aligned stack.
+ if (is64bit) {
+ __ subl(ESP, Immediate(8));
+ __ pushl(Immediate(0x7FFFFFFF));
+ __ pushl(Immediate(0xFFFFFFFF));
+ __ andpd(output.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
+ } else {
+ __ subl(ESP, Immediate(12));
+ __ pushl(Immediate(0x7FFFFFFF));
+ __ andps(output.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
+ }
+ __ addl(ESP, Immediate(16));
+ } else {
+ // TODO: update when assember support is available.
+ UNIMPLEMENTED(FATAL) << "Needs assembler support.";
+// Once assembler support is available, in-memory operations look like this:
+// if (is64bit) {
+// DCHECK(output.IsDoubleStackSlot());
+// __ andl(Address(Register(RSP), output.GetHighStackIndex(kX86WordSize)),
+// Immediate(0x7FFFFFFF));
+// } else {
+// DCHECK(output.IsStackSlot());
+// // Can use and with a literal directly.
+// __ andl(Address(Register(RSP), output.GetStackIndex()), Immediate(0x7FFFFFFF));
+// }
+ }
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathAbsDouble(HInvoke* invoke) {
+ CreateFloatToFloat(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) {
+ MathAbsFP(invoke->GetLocations(), true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathAbsFloat(HInvoke* invoke) {
+ CreateFloatToFloat(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathAbsFloat(HInvoke* invoke) {
+ MathAbsFP(invoke->GetLocations(), false, GetAssembler());
+}
+
+static void CreateAbsIntLocation(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RegisterLocation(EAX));
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RegisterLocation(EDX));
+}
+
+static void GenAbsInteger(LocationSummary* locations, X86Assembler* assembler) {
+ Location output = locations->Out();
+ Register out = output.AsRegister<Register>();
+ DCHECK_EQ(out, EAX);
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ DCHECK_EQ(temp, EDX);
+
+ // Sign extend EAX into EDX.
+ __ cdq();
+
+ // XOR EAX with sign.
+ __ xorl(EAX, EDX);
+
+ // Subtract out sign to correct.
+ __ subl(EAX, EDX);
+
+ // The result is in EAX.
+}
+
+static void CreateAbsLongLocation(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+static void GenAbsLong(LocationSummary* locations, X86Assembler* assembler) {
+ Location input = locations->InAt(0);
+ Register input_lo = input.AsRegisterPairLow<Register>();
+ Register input_hi = input.AsRegisterPairHigh<Register>();
+ Location output = locations->Out();
+ Register output_lo = output.AsRegisterPairLow<Register>();
+ Register output_hi = output.AsRegisterPairHigh<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+
+ // Compute the sign into the temporary.
+ __ movl(temp, input_hi);
+ __ sarl(temp, Immediate(31));
+
+ // Store the sign into the output.
+ __ movl(output_lo, temp);
+ __ movl(output_hi, temp);
+
+ // XOR the input to the output.
+ __ xorl(output_lo, input_lo);
+ __ xorl(output_hi, input_hi);
+
+ // Subtract the sign.
+ __ subl(output_lo, temp);
+ __ sbbl(output_hi, temp);
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathAbsInt(HInvoke* invoke) {
+ CreateAbsIntLocation(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathAbsInt(HInvoke* invoke) {
+ GenAbsInteger(invoke->GetLocations(), GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathAbsLong(HInvoke* invoke) {
+ CreateAbsLongLocation(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathAbsLong(HInvoke* invoke) {
+ GenAbsLong(invoke->GetLocations(), GetAssembler());
+}
+
+static void GenMinMaxFP(LocationSummary* locations, bool is_min, bool is_double,
+ X86Assembler* assembler) {
+ Location op1_loc = locations->InAt(0);
+ Location op2_loc = locations->InAt(1);
+ Location out_loc = locations->Out();
+ XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
+
+ // Shortcut for same input locations.
+ if (op1_loc.Equals(op2_loc)) {
+ DCHECK(out_loc.Equals(op1_loc));
+ return;
+ }
+
+ // (out := op1)
+ // out <=? op2
+ // if Nan jmp Nan_label
+ // if out is min jmp done
+ // if op2 is min jmp op2_label
+ // handle -0/+0
+ // jmp done
+ // Nan_label:
+ // out := NaN
+ // op2_label:
+ // out := op2
+ // done:
+ //
+ // This removes one jmp, but needs to copy one input (op1) to out.
+ //
+ // TODO: This is straight from Quick (except literal pool). Make NaN an out-of-line slowpath?
+
+ XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
+
+ Label nan, done, op2_label;
+ if (is_double) {
+ __ ucomisd(out, op2);
+ } else {
+ __ ucomiss(out, op2);
+ }
+
+ __ j(Condition::kParityEven, &nan);
+
+ __ j(is_min ? Condition::kAbove : Condition::kBelow, &op2_label);
+ __ j(is_min ? Condition::kBelow : Condition::kAbove, &done);
+
+ // Handle 0.0/-0.0.
+ if (is_min) {
+ if (is_double) {
+ __ orpd(out, op2);
+ } else {
+ __ orps(out, op2);
+ }
+ } else {
+ if (is_double) {
+ __ andpd(out, op2);
+ } else {
+ __ andps(out, op2);
+ }
+ }
+ __ jmp(&done);
+
+ // NaN handling.
+ __ Bind(&nan);
+ if (is_double) {
+ __ pushl(Immediate(kDoubleNaNHigh));
+ __ pushl(Immediate(kDoubleNaNLow));
+ __ movsd(out, Address(ESP, 0));
+ __ addl(ESP, Immediate(8));
+ } else {
+ __ pushl(Immediate(kFloatNaN));
+ __ movss(out, Address(ESP, 0));
+ __ addl(ESP, Immediate(4));
+ }
+ __ jmp(&done);
+
+ // out := op2;
+ __ Bind(&op2_label);
+ if (is_double) {
+ __ movsd(out, op2);
+ } else {
+ __ movss(out, op2);
+ }
+
+ // Done.
+ __ Bind(&done);
+}
+
+static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ // The following is sub-optimal, but all we can do for now. It would be fine to also accept
+ // the second input to be the output (we can simply swap inputs).
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
+ CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
+ GenMinMaxFP(invoke->GetLocations(), true, true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathMinFloatFloat(HInvoke* invoke) {
+ CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) {
+ GenMinMaxFP(invoke->GetLocations(), true, false, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
+ CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
+ GenMinMaxFP(invoke->GetLocations(), false, true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
+ CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
+ GenMinMaxFP(invoke->GetLocations(), false, false, GetAssembler());
+}
+
+static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long,
+ X86Assembler* assembler) {
+ Location op1_loc = locations->InAt(0);
+ Location op2_loc = locations->InAt(1);
+
+ // Shortcut for same input locations.
+ if (op1_loc.Equals(op2_loc)) {
+ // Can return immediately, as op1_loc == out_loc.
+ // Note: if we ever support separate registers, e.g., output into memory, we need to check for
+ // a copy here.
+ DCHECK(locations->Out().Equals(op1_loc));
+ return;
+ }
+
+ if (is_long) {
+ // Need to perform a subtract to get the sign right.
+ // op1 is already in the same location as the output.
+ Location output = locations->Out();
+ Register output_lo = output.AsRegisterPairLow<Register>();
+ Register output_hi = output.AsRegisterPairHigh<Register>();
+
+ Register op2_lo = op2_loc.AsRegisterPairLow<Register>();
+ Register op2_hi = op2_loc.AsRegisterPairHigh<Register>();
+
+ // Spare register to compute the subtraction to set condition code.
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+
+ // Subtract off op2_low.
+ __ movl(temp, output_lo);
+ __ subl(temp, op2_lo);
+
+ // Now use the same tempo and the borrow to finish the subtraction of op2_hi.
+ __ movl(temp, output_hi);
+ __ sbbl(temp, op2_hi);
+
+ // Now the condition code is correct.
+ Condition cond = is_min ? Condition::kGreaterEqual : Condition::kLess;
+ __ cmovl(cond, output_lo, op2_lo);
+ __ cmovl(cond, output_hi, op2_hi);
+ } else {
+ Register out = locations->Out().AsRegister<Register>();
+ Register op2 = op2_loc.AsRegister<Register>();
+
+ // (out := op1)
+ // out <=? op2
+ // if out is min jmp done
+ // out := op2
+ // done:
+
+ __ cmpl(out, op2);
+ Condition cond = is_min ? Condition::kGreater : Condition::kLess;
+ __ cmovl(cond, out, op2);
+ }
+}
+
+static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ // Register to use to perform a long subtract to set cc.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathMinIntInt(HInvoke* invoke) {
+ CreateIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathMinIntInt(HInvoke* invoke) {
+ GenMinMax(invoke->GetLocations(), true, false, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathMinLongLong(HInvoke* invoke) {
+ CreateLongLongToLongLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathMinLongLong(HInvoke* invoke) {
+ GenMinMax(invoke->GetLocations(), true, true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathMaxIntInt(HInvoke* invoke) {
+ CreateIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathMaxIntInt(HInvoke* invoke) {
+ GenMinMax(invoke->GetLocations(), false, false, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathMaxLongLong(HInvoke* invoke) {
+ CreateLongLongToLongLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathMaxLongLong(HInvoke* invoke) {
+ GenMinMax(invoke->GetLocations(), false, true, GetAssembler());
+}
+
+static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMathSqrt(HInvoke* invoke) {
+ CreateFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathSqrt(HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
+
+ GetAssembler()->sqrtsd(out, in);
+}
+
+void IntrinsicLocationsBuilderX86::VisitStringCharAt(HInvoke* invoke) {
+ // The inputs plus one temp.
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ // Needs to be EAX for the invoke.
+ locations->AddTemp(Location::RegisterLocation(EAX));
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Location of reference to data array
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+ // Location of count
+ const int32_t count_offset = mirror::String::CountOffset().Int32Value();
+ // Starting offset within data array
+ const int32_t offset_offset = mirror::String::OffsetOffset().Int32Value();
+ // Start of char data with array_
+ const int32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
+
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register idx = locations->InAt(1).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+ Location temp_loc = locations->GetTemp(0);
+ Register temp = temp_loc.AsRegister<Register>();
+
+ // TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
+ // the cost.
+ // TODO: For simplicity, the index parameter is requested in a register, so different from Quick
+ // we will not optimize the code for constants (which would save a register).
+
+ SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke, temp);
+ codegen_->AddSlowPath(slow_path);
+
+ X86Assembler* assembler = GetAssembler();
+
+ __ cmpl(idx, Address(obj, count_offset));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ j(kAboveEqual, slow_path->GetEntryLabel());
+
+ // Get the actual element.
+ __ movl(temp, idx); // temp := idx.
+ __ addl(temp, Address(obj, offset_offset)); // temp := offset + idx.
+ __ movl(out, Address(obj, value_offset)); // obj := obj.array.
+ // out = out[2*temp].
+ __ movzxw(out, Address(out, temp, ScaleFactor::TIMES_2, data_offset));
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderX86::VisitStringCompareTo(HInvoke* invoke) {
+ // The inputs plus one temp.
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(EAX));
+ // Needs to be EAX for the invoke.
+ locations->AddTemp(Location::RegisterLocation(EAX));
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheck());
+
+ Register argument = locations->InAt(1).AsRegister<Register>();
+ __ testl(argument, argument);
+ SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(
+ invoke, locations->GetTemp(0).AsRegister<Register>());
+ codegen_->AddSlowPath(slow_path);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pStringCompareTo)));
+ __ Bind(slow_path->GetExitLabel());
+}
+
+static void GenPeek(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) {
+ Register address = locations->InAt(0).AsRegisterPairLow<Register>();
+ Location out_loc = locations->Out();
+ // x86 allows unaligned access. We do not have to check the input or use specific instructions
+ // to avoid a SIGBUS.
+ switch (size) {
+ case Primitive::kPrimByte:
+ __ movsxb(out_loc.AsRegister<Register>(), Address(address, 0));
+ break;
+ case Primitive::kPrimShort:
+ __ movsxw(out_loc.AsRegister<Register>(), Address(address, 0));
+ break;
+ case Primitive::kPrimInt:
+ __ movl(out_loc.AsRegister<Register>(), Address(address, 0));
+ break;
+ case Primitive::kPrimLong:
+ __ movl(out_loc.AsRegisterPairLow<Register>(), Address(address, 0));
+ __ movl(out_loc.AsRegisterPairHigh<Register>(), Address(address, 4));
+ break;
+ default:
+ LOG(FATAL) << "Type not recognized for peek: " << size;
+ UNREACHABLE();
+ }
+}
+
+void IntrinsicLocationsBuilderX86::VisitMemoryPeekByte(HInvoke* invoke) {
+ CreateLongToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMemoryPeekByte(HInvoke* invoke) {
+ GenPeek(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
+ CreateLongToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
+ GenPeek(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
+ CreateLongToLongLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
+ GenPeek(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
+ CreateLongToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
+ GenPeek(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
+}
+
+static void CreateLongIntToVoidLocations(ArenaAllocator* arena, Primitive::Type size,
+ HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ HInstruction *value = invoke->InputAt(1);
+ if (size == Primitive::kPrimByte) {
+ locations->SetInAt(1, Location::ByteRegisterOrConstant(EDX, value));
+ } else {
+ locations->SetInAt(1, Location::RegisterOrConstant(value));
+ }
+}
+
+static void GenPoke(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) {
+ Register address = locations->InAt(0).AsRegisterPairLow<Register>();
+ Location value_loc = locations->InAt(1);
+ // x86 allows unaligned access. We do not have to check the input or use specific instructions
+ // to avoid a SIGBUS.
+ switch (size) {
+ case Primitive::kPrimByte:
+ if (value_loc.IsConstant()) {
+ __ movb(Address(address, 0),
+ Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ movb(Address(address, 0), value_loc.AsRegister<ByteRegister>());
+ }
+ break;
+ case Primitive::kPrimShort:
+ if (value_loc.IsConstant()) {
+ __ movw(Address(address, 0),
+ Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ movw(Address(address, 0), value_loc.AsRegister<Register>());
+ }
+ break;
+ case Primitive::kPrimInt:
+ if (value_loc.IsConstant()) {
+ __ movl(Address(address, 0),
+ Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ movl(Address(address, 0), value_loc.AsRegister<Register>());
+ }
+ break;
+ case Primitive::kPrimLong:
+ if (value_loc.IsConstant()) {
+ int64_t value = value_loc.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(Address(address, 0), Immediate(Low32Bits(value)));
+ __ movl(Address(address, 4), Immediate(High32Bits(value)));
+ } else {
+ __ movl(Address(address, 0), value_loc.AsRegisterPairLow<Register>());
+ __ movl(Address(address, 4), value_loc.AsRegisterPairHigh<Register>());
+ }
+ break;
+ default:
+ LOG(FATAL) << "Type not recognized for poke: " << size;
+ UNREACHABLE();
+ }
+}
+
+void IntrinsicLocationsBuilderX86::VisitMemoryPokeByte(HInvoke* invoke) {
+ CreateLongIntToVoidLocations(arena_, Primitive::kPrimByte, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMemoryPokeByte(HInvoke* invoke) {
+ GenPoke(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
+ CreateLongIntToVoidLocations(arena_, Primitive::kPrimInt, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
+ GenPoke(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
+ CreateLongIntToVoidLocations(arena_, Primitive::kPrimLong, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
+ GenPoke(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
+ CreateLongIntToVoidLocations(arena_, Primitive::kPrimShort, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
+ GenPoke(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderX86::VisitThreadCurrentThread(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86::VisitThreadCurrentThread(HInvoke* invoke) {
+ Register out = invoke->GetLocations()->Out().AsRegister<Register>();
+ GetAssembler()->fs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86WordSize>()));
+}
+
+static void GenUnsafeGet(LocationSummary* locations, Primitive::Type type,
+ bool is_volatile, X86Assembler* assembler) {
+ Register base = locations->InAt(1).AsRegister<Register>();
+ Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
+ Location output = locations->Out();
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ __ movl(output.AsRegister<Register>(), Address(base, offset, ScaleFactor::TIMES_1, 0));
+ break;
+
+ case Primitive::kPrimLong: {
+ Register output_lo = output.AsRegisterPairLow<Register>();
+ Register output_hi = output.AsRegisterPairHigh<Register>();
+ if (is_volatile) {
+ // Need to use a XMM to read atomically.
+ XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ __ movsd(temp, Address(base, offset, ScaleFactor::TIMES_1, 0));
+ __ movd(output_lo, temp);
+ __ psrlq(temp, Immediate(32));
+ __ movd(output_hi, temp);
+ } else {
+ __ movl(output_lo, Address(base, offset, ScaleFactor::TIMES_1, 0));
+ __ movl(output_hi, Address(base, offset, ScaleFactor::TIMES_1, 4));
+ }
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unsupported op size " << type;
+ UNREACHABLE();
+ }
+}
+
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke,
+ bool is_long, bool is_volatile) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ if (is_long) {
+ if (is_volatile) {
+ // Need to use XMM to read volatile.
+ locations->AddTemp(Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ }
+ } else {
+ locations->SetOut(Location::RequiresRegister());
+ }
+}
+
+void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke, false, false);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke, false, true);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke, false, false);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke, true, true);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke, false, false);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke, false, true);
+}
+
+
+void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) {
+ GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, false, GetAssembler());
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, true, GetAssembler());
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) {
+ GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, false, GetAssembler());
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, true, GetAssembler());
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) {
+ GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, false, GetAssembler());
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, true, GetAssembler());
+}
+
+
+static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
+ Primitive::Type type,
+ HInvoke* invoke,
+ bool is_volatile) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+ if (type == Primitive::kPrimNot) {
+ // Need temp registers for card-marking.
+ locations->AddTemp(Location::RequiresRegister());
+ // Ensure the value is in a byte register.
+ locations->AddTemp(Location::RegisterLocation(ECX));
+ } else if (type == Primitive::kPrimLong && is_volatile) {
+ locations->AddTemp(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke, false);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke, false);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke, true);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke, false);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke, false);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke, true);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke, false);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke, false);
+}
+void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke, true);
+}
+
+// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
+// memory model.
+static void GenUnsafePut(LocationSummary* locations,
+ Primitive::Type type,
+ bool is_volatile,
+ CodeGeneratorX86* codegen) {
+ X86Assembler* assembler = reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
+ Register base = locations->InAt(1).AsRegister<Register>();
+ Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
+ Location value_loc = locations->InAt(3);
+
+ if (type == Primitive::kPrimLong) {
+ Register value_lo = value_loc.AsRegisterPairLow<Register>();
+ Register value_hi = value_loc.AsRegisterPairHigh<Register>();
+ if (is_volatile) {
+ XmmRegister temp1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister temp2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ __ movd(temp1, value_lo);
+ __ movd(temp2, value_hi);
+ __ punpckldq(temp1, temp2);
+ __ movsd(Address(base, offset, ScaleFactor::TIMES_1, 0), temp1);
+ } else {
+ __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value_lo);
+ __ movl(Address(base, offset, ScaleFactor::TIMES_1, 4), value_hi);
+ }
+ } else {
+ __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value_loc.AsRegister<Register>());
+ }
+
+ if (is_volatile) {
+ __ mfence();
+ }
+
+ if (type == Primitive::kPrimNot) {
+ codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
+ locations->GetTemp(1).AsRegister<Register>(),
+ base,
+ value_loc.AsRegister<Register>());
+ }
+}
+
+void IntrinsicCodeGeneratorX86::VisitUnsafePut(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, codegen_);
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafePutOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, codegen_);
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafePutVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, true, codegen_);
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafePutObject(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, codegen_);
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, codegen_);
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, true, codegen_);
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafePutLong(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, codegen_);
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, codegen_);
+}
+void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, true, codegen_);
+}
+
+// Unimplemented intrinsics.
+
+#define UNIMPLEMENTED_INTRINSIC(Name) \
+void IntrinsicLocationsBuilderX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
+} \
+void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
+}
+
+UNIMPLEMENTED_INTRINSIC(IntegerReverse)
+UNIMPLEMENTED_INTRINSIC(LongReverse)
+UNIMPLEMENTED_INTRINSIC(LongReverseBytes)
+UNIMPLEMENTED_INTRINSIC(MathFloor)
+UNIMPLEMENTED_INTRINSIC(MathCeil)
+UNIMPLEMENTED_INTRINSIC(MathRint)
+UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
+UNIMPLEMENTED_INTRINSIC(StringIndexOf)
+UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
+UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
+UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
+UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
+UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
new file mode 100644
index 0000000..e1e8260
--- /dev/null
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INTRINSICS_X86_H_
+#define ART_COMPILER_OPTIMIZING_INTRINSICS_X86_H_
+
+#include "intrinsics.h"
+
+namespace art {
+
+class ArenaAllocator;
+class HInvokeStaticOrDirect;
+class HInvokeVirtual;
+
+namespace x86 {
+
+class CodeGeneratorX86;
+class X86Assembler;
+
+class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor {
+ public:
+ explicit IntrinsicLocationsBuilderX86(ArenaAllocator* arena) : arena_(arena) {}
+
+ // Define visitor methods.
+
+#define OPTIMIZING_INTRINSICS(Name, IsStatic) \
+ void Visit ## Name(HInvoke* invoke) OVERRIDE;
+#include "intrinsics_list.h"
+INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+
+ // Check whether an invoke is an intrinsic, and if so, create a location summary. Returns whether
+ // a corresponding LocationSummary with the intrinsified_ flag set was generated and attached to
+ // the invoke.
+ bool TryDispatch(HInvoke* invoke);
+
+ private:
+ ArenaAllocator* arena_;
+
+ DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
+};
+
+class IntrinsicCodeGeneratorX86 FINAL : public IntrinsicVisitor {
+ public:
+ explicit IntrinsicCodeGeneratorX86(CodeGeneratorX86* codegen) : codegen_(codegen) {}
+
+ // Define visitor methods.
+
+#define OPTIMIZING_INTRINSICS(Name, IsStatic) \
+ void Visit ## Name(HInvoke* invoke) OVERRIDE;
+#include "intrinsics_list.h"
+INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+
+ private:
+ X86Assembler* GetAssembler();
+
+ ArenaAllocator* GetAllocator();
+
+ CodeGeneratorX86* codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorX86);
+};
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INTRINSICS_X86_H_
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 2064b18..736cea8 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -667,6 +667,34 @@
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(RAX));
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheck());
+
+ CpuRegister argument = locations->InAt(1).AsRegister<CpuRegister>();
+ __ testl(argument, argument);
+ SlowPathCodeX86_64* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pStringCompareTo), true));
+ __ Bind(slow_path->GetExitLabel());
+}
+
static void GenPeek(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) {
CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>(); // == address, here for clarity.
@@ -986,9 +1014,6 @@
UNIMPLEMENTED_INTRINSIC(MathRint)
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(StringIsEmpty) // Might not want to do these two anyways, inlining should
-UNIMPLEMENTED_INTRINSIC(StringLength) // be good enough here.
-UNIMPLEMENTED_INTRINSIC(StringCompareTo)
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 4ac1fe8..a1ae670 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -56,6 +56,19 @@
: Location::RequiresRegister();
}
+Location Location::RegisterOrInt32LongConstant(HInstruction* instruction) {
+ if (!instruction->IsConstant() || !instruction->AsConstant()->IsLongConstant()) {
+ return Location::RequiresRegister();
+ }
+
+ // Does the long constant fit in a 32 bit int?
+ int64_t value = instruction->AsConstant()->AsLongConstant()->GetValue();
+
+ return IsInt<32>(value)
+ ? Location::ConstantLocation(instruction->AsConstant())
+ : Location::RequiresRegister();
+}
+
Location Location::ByteRegisterOrConstant(int reg, HInstruction* instruction) {
return instruction->IsConstant()
? Location::ConstantLocation(instruction->AsConstant())
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 566c0da..de876be 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -345,6 +345,7 @@
}
static Location RegisterOrConstant(HInstruction* instruction);
+ static Location RegisterOrInt32LongConstant(HInstruction* instruction);
static Location ByteRegisterOrConstant(int reg, HInstruction* instruction);
// The location of the first input to the instruction will be
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index a90ebce..dca612e 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -185,7 +185,7 @@
if (successor->IsLoopHeader()) {
// If we split at a back edge boundary, make the new block the back edge.
HLoopInformation* info = successor->GetLoopInformation();
- if (info->IsBackEdge(block)) {
+ if (info->IsBackEdge(*block)) {
info->RemoveBackEdge(block);
info->AddBackEdge(new_block);
}
@@ -287,19 +287,72 @@
return true;
}
+void HGraph::InsertConstant(HConstant* constant) {
+ // New constants are inserted before the final control-flow instruction
+ // of the graph, or at its end if called from the graph builder.
+ if (entry_block_->EndsWithControlFlowInstruction()) {
+ entry_block_->InsertInstructionBefore(constant, entry_block_->GetLastInstruction());
+ } else {
+ entry_block_->AddInstruction(constant);
+ }
+}
+
HNullConstant* HGraph::GetNullConstant() {
if (cached_null_constant_ == nullptr) {
cached_null_constant_ = new (arena_) HNullConstant();
- entry_block_->InsertInstructionBefore(cached_null_constant_,
- entry_block_->GetLastInstruction());
+ InsertConstant(cached_null_constant_);
}
return cached_null_constant_;
}
+template <class InstructionType, typename ValueType>
+InstructionType* HGraph::CreateConstant(ValueType value,
+ ArenaSafeMap<ValueType, InstructionType*>* cache) {
+ // Try to find an existing constant of the given value.
+ InstructionType* constant = nullptr;
+ auto cached_constant = cache->find(value);
+ if (cached_constant != cache->end()) {
+ constant = cached_constant->second;
+ }
+
+ // If not found or previously deleted, create and cache a new instruction.
+ if (constant == nullptr || constant->GetBlock() == nullptr) {
+ constant = new (arena_) InstructionType(value);
+ cache->Overwrite(value, constant);
+ InsertConstant(constant);
+ }
+ return constant;
+}
+
+HConstant* HGraph::GetConstant(Primitive::Type type, int64_t value) {
+ switch (type) {
+ case Primitive::Type::kPrimBoolean:
+ DCHECK(IsUint<1>(value));
+ FALLTHROUGH_INTENDED;
+ case Primitive::Type::kPrimByte:
+ case Primitive::Type::kPrimChar:
+ case Primitive::Type::kPrimShort:
+ case Primitive::Type::kPrimInt:
+ DCHECK(IsInt(Primitive::ComponentSize(type) * kBitsPerByte, value));
+ return GetIntConstant(static_cast<int32_t>(value));
+
+ case Primitive::Type::kPrimLong:
+ return GetLongConstant(value);
+
+ default:
+ LOG(FATAL) << "Unsupported constant type";
+ UNREACHABLE();
+ }
+}
+
void HLoopInformation::Add(HBasicBlock* block) {
blocks_.SetBit(block->GetBlockId());
}
+void HLoopInformation::Remove(HBasicBlock* block) {
+ blocks_.ClearBit(block->GetBlockId());
+}
+
void HLoopInformation::PopulateRecursive(HBasicBlock* block) {
if (blocks_.IsBitSet(block->GetBlockId())) {
return;
@@ -621,7 +674,10 @@
void HGraphVisitor::VisitInsertionOrder() {
const GrowableArray<HBasicBlock*>& blocks = graph_->GetBlocks();
for (size_t i = 0 ; i < blocks.Size(); i++) {
- VisitBasicBlock(blocks.Get(i));
+ HBasicBlock* block = blocks.Get(i);
+ if (block != nullptr) {
+ VisitBasicBlock(block);
+ }
}
}
@@ -643,7 +699,7 @@
HConstant* HUnaryOperation::TryStaticEvaluation() const {
if (GetInput()->IsIntConstant()) {
int32_t value = Evaluate(GetInput()->AsIntConstant()->GetValue());
- return new(GetBlock()->GetGraph()->GetArena()) HIntConstant(value);
+ return GetBlock()->GetGraph()->GetIntConstant(value);
} else if (GetInput()->IsLongConstant()) {
// TODO: Implement static evaluation of long unary operations.
//
@@ -659,15 +715,15 @@
if (GetLeft()->IsIntConstant() && GetRight()->IsIntConstant()) {
int32_t value = Evaluate(GetLeft()->AsIntConstant()->GetValue(),
GetRight()->AsIntConstant()->GetValue());
- return new(GetBlock()->GetGraph()->GetArena()) HIntConstant(value);
+ return GetBlock()->GetGraph()->GetIntConstant(value);
} else if (GetLeft()->IsLongConstant() && GetRight()->IsLongConstant()) {
int64_t value = Evaluate(GetLeft()->AsLongConstant()->GetValue(),
GetRight()->AsLongConstant()->GetValue());
if (GetResultType() == Primitive::kPrimLong) {
- return new(GetBlock()->GetGraph()->GetArena()) HLongConstant(value);
+ return GetBlock()->GetGraph()->GetLongConstant(value);
} else {
DCHECK_EQ(GetResultType(), Primitive::kPrimInt);
- return new(GetBlock()->GetGraph()->GetArena()) HIntConstant(value);
+ return GetBlock()->GetGraph()->GetIntConstant(static_cast<int32_t>(value));
}
}
return nullptr;
@@ -700,16 +756,6 @@
return this == if_->GetPreviousDisregardingMoves();
}
-HConstant* HConstant::NewConstant(ArenaAllocator* allocator, Primitive::Type type, int64_t val) {
- if (type == Primitive::kPrimInt) {
- DCHECK(IsInt<32>(val));
- return new (allocator) HIntConstant(val);
- } else {
- DCHECK_EQ(type, Primitive::kPrimLong);
- return new (allocator) HLongConstant(val);
- }
-}
-
bool HInstruction::Equals(HInstruction* other) const {
if (!InstructionTypeEquals(other)) return false;
DCHECK_EQ(GetKind(), other->GetKind());
@@ -788,6 +834,29 @@
return new_block;
}
+bool HBasicBlock::IsSingleGoto() const {
+ HLoopInformation* loop_info = GetLoopInformation();
+ // TODO: Remove the null check b/19084197.
+ return GetFirstInstruction() != nullptr
+ && GetPhis().IsEmpty()
+ && GetFirstInstruction() == GetLastInstruction()
+ && GetLastInstruction()->IsGoto()
+ // Back edges generate the suspend check.
+ && (loop_info == nullptr || !loop_info->IsBackEdge(*this));
+}
+
+bool HBasicBlock::EndsWithControlFlowInstruction() const {
+ return !GetInstructions().IsEmpty() && GetLastInstruction()->IsControlFlow();
+}
+
+bool HBasicBlock::EndsWithIf() const {
+ return !GetInstructions().IsEmpty() && GetLastInstruction()->IsIf();
+}
+
+bool HBasicBlock::HasSinglePhi() const {
+ return !GetPhis().IsEmpty() && GetFirstPhi()->GetNext() == nullptr;
+}
+
void HInstructionList::SetBlockOfInstructions(HBasicBlock* block) const {
for (HInstruction* current = first_instruction_;
current != nullptr;
@@ -811,14 +880,35 @@
}
void HInstructionList::Add(const HInstructionList& instruction_list) {
- DCHECK(!IsEmpty());
- AddAfter(last_instruction_, instruction_list);
+ if (IsEmpty()) {
+ first_instruction_ = instruction_list.first_instruction_;
+ last_instruction_ = instruction_list.last_instruction_;
+ } else {
+ AddAfter(last_instruction_, instruction_list);
+ }
+}
+
+void HBasicBlock::DisconnectFromAll() {
+ DCHECK(dominated_blocks_.IsEmpty()) << "Unimplemented scenario";
+
+ for (size_t i = 0, e = predecessors_.Size(); i < e; ++i) {
+ predecessors_.Get(i)->successors_.Delete(this);
+ }
+ for (size_t i = 0, e = successors_.Size(); i < e; ++i) {
+ successors_.Get(i)->predecessors_.Delete(this);
+ }
+ dominator_->dominated_blocks_.Delete(this);
+
+ predecessors_.Reset();
+ successors_.Reset();
+ dominator_ = nullptr;
+ graph_ = nullptr;
}
void HBasicBlock::MergeWith(HBasicBlock* other) {
DCHECK(successors_.IsEmpty()) << "Unimplemented block merge scenario";
- DCHECK(dominated_blocks_.IsEmpty()) << "Unimplemented block merge scenario";
- DCHECK(other->GetDominator()->IsEntryBlock() && other->GetGraph() != graph_)
+ DCHECK(dominated_blocks_.IsEmpty()
+ || (dominated_blocks_.Size() == 1 && dominated_blocks_.Get(0) == other))
<< "Unimplemented block merge scenario";
DCHECK(other->GetPhis().IsEmpty());
@@ -1006,7 +1096,7 @@
if (info != nullptr) {
info->Add(to);
to->SetLoopInformation(info);
- if (info->IsBackEdge(at)) {
+ if (info->IsBackEdge(*at)) {
// Only `at` can become a back edge, as the inlined blocks
// are predecessors of `at`.
DCHECK_EQ(1u, info->NumberOfBackEdges());
@@ -1020,6 +1110,53 @@
invoke->GetBlock()->RemoveInstruction(invoke);
}
+void HGraph::MergeEmptyBranches(HBasicBlock* start_block, HBasicBlock* end_block) {
+ // Find the two branches of an If.
+ DCHECK_EQ(start_block->GetSuccessors().Size(), 2u);
+ HBasicBlock* left_branch = start_block->GetSuccessors().Get(0);
+ HBasicBlock* right_branch = start_block->GetSuccessors().Get(1);
+
+ // Make sure this is a diamond control-flow path.
+ DCHECK_EQ(left_branch->GetSuccessors().Get(0), end_block);
+ DCHECK_EQ(right_branch->GetSuccessors().Get(0), end_block);
+ DCHECK_EQ(end_block->GetPredecessors().Size(), 2u);
+ DCHECK_EQ(start_block, end_block->GetDominator());
+
+ // Disconnect the branches and merge the two blocks. This will move
+ // all instructions from 'end_block' to 'start_block'.
+ DCHECK(left_branch->IsSingleGoto());
+ DCHECK(right_branch->IsSingleGoto());
+ left_branch->DisconnectFromAll();
+ right_branch->DisconnectFromAll();
+ start_block->RemoveInstruction(start_block->GetLastInstruction());
+ start_block->MergeWith(end_block);
+
+ // Delete the now redundant blocks from the graph.
+ blocks_.Put(left_branch->GetBlockId(), nullptr);
+ blocks_.Put(right_branch->GetBlockId(), nullptr);
+ blocks_.Put(end_block->GetBlockId(), nullptr);
+
+ // Update reverse post order.
+ reverse_post_order_.Delete(left_branch);
+ reverse_post_order_.Delete(right_branch);
+ reverse_post_order_.Delete(end_block);
+
+ // Update loops which contain the code.
+ for (HLoopInformationOutwardIterator it(*start_block); !it.Done(); it.Advance()) {
+ HLoopInformation* loop_info = it.Current();
+ DCHECK(loop_info->Contains(*left_branch));
+ DCHECK(loop_info->Contains(*right_branch));
+ DCHECK(loop_info->Contains(*end_block));
+ loop_info->Remove(left_branch);
+ loop_info->Remove(right_branch);
+ loop_info->Remove(end_block);
+ if (loop_info->IsBackEdge(*end_block)) {
+ loop_info->RemoveBackEdge(end_block);
+ loop_info->AddBackEdge(start_block);
+ }
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs) {
ScopedObjectAccess soa(Thread::Current());
os << "["
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index a35fa1d..21ed350 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_NODES_H_
#define ART_COMPILER_OPTIMIZING_NODES_H_
+#include "base/arena_containers.h"
#include "base/arena_object.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "handle.h"
@@ -33,16 +34,20 @@
class GraphChecker;
class HBasicBlock;
+class HDoubleConstant;
class HEnvironment;
+class HFloatConstant;
+class HGraphVisitor;
class HInstruction;
class HIntConstant;
class HInvoke;
-class HGraphVisitor;
+class HLongConstant;
class HNullConstant;
class HPhi;
class HSuspendCheck;
class LiveInterval;
class LocationSummary;
+class SsaBuilder;
static const int kDefaultNumberOfBlocks = 8;
static const int kDefaultNumberOfSuccessors = 2;
@@ -115,7 +120,10 @@
temporaries_vreg_slots_(0),
has_array_accesses_(false),
debuggable_(debuggable),
- current_instruction_id_(start_instruction_id) {}
+ current_instruction_id_(start_instruction_id),
+ cached_null_constant_(nullptr),
+ cached_int_constants_(std::less<int32_t>(), arena->Adapter()),
+ cached_long_constants_(std::less<int64_t>(), arena->Adapter()) {}
ArenaAllocator* GetArena() const { return arena_; }
const GrowableArray<HBasicBlock*>& GetBlocks() const { return blocks_; }
@@ -154,6 +162,8 @@
// Inline this graph in `outer_graph`, replacing the given `invoke` instruction.
void InlineInto(HGraph* outer_graph, HInvoke* invoke);
+ void MergeEmptyBranches(HBasicBlock* start_block, HBasicBlock* end_block);
+
void SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor);
void SimplifyLoop(HBasicBlock* header);
@@ -216,7 +226,17 @@
bool IsDebuggable() const { return debuggable_; }
+ // Returns a constant of the given type and value. If it does not exist
+ // already, it is created and inserted into the graph. Only integral types
+ // are currently supported.
+ HConstant* GetConstant(Primitive::Type type, int64_t value);
HNullConstant* GetNullConstant();
+ HIntConstant* GetIntConstant(int32_t value) {
+ return CreateConstant(value, &cached_int_constants_);
+ }
+ HLongConstant* GetLongConstant(int64_t value) {
+ return CreateConstant(value, &cached_long_constants_);
+ }
private:
HBasicBlock* FindCommonDominator(HBasicBlock* first, HBasicBlock* second) const;
@@ -230,6 +250,10 @@
void RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visited) const;
void RemoveDeadBlocks(const ArenaBitVector& visited) const;
+ template <class InstType, typename ValueType>
+ InstType* CreateConstant(ValueType value, ArenaSafeMap<ValueType, InstType*>* cache);
+ void InsertConstant(HConstant* instruction);
+
ArenaAllocator* const arena_;
// List of blocks in insertion order.
@@ -264,8 +288,10 @@
// The current id to assign to a newly added instruction. See HInstruction.id_.
int32_t current_instruction_id_;
- // Cached null constant that might be created when building SSA form.
+ // Cached common constants often needed by optimization passes.
HNullConstant* cached_null_constant_;
+ ArenaSafeMap<int32_t, HIntConstant*> cached_int_constants_;
+ ArenaSafeMap<int64_t, HLongConstant*> cached_long_constants_;
ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
DISALLOW_COPY_AND_ASSIGN(HGraph);
@@ -300,9 +326,9 @@
back_edges_.Delete(back_edge);
}
- bool IsBackEdge(HBasicBlock* block) {
+ bool IsBackEdge(const HBasicBlock& block) const {
for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
- if (back_edges_.Get(i) == block) return true;
+ if (back_edges_.Get(i) == &block) return true;
}
return false;
}
@@ -336,6 +362,7 @@
const ArenaBitVector& GetBlocks() const { return blocks_; }
void Add(HBasicBlock* block);
+ void Remove(HBasicBlock* block);
private:
// Internal recursive implementation of `Populate`.
@@ -391,6 +418,8 @@
return graph_->GetExitBlock() == this;
}
+ bool IsSingleGoto() const;
+
void AddBackEdge(HBasicBlock* back_edge) {
if (loop_information_ == nullptr) {
loop_information_ = new (graph_->GetArena()) HLoopInformation(this, graph_);
@@ -512,8 +541,16 @@
// of `this` are moved to `other`.
// Note that this method does not update the graph, reverse post order, loop
// information, nor make sure the blocks are consistent (for example ending
+ // with a control flow instruction).
void ReplaceWith(HBasicBlock* other);
+ // Disconnects `this` from all its predecessors, successors and the dominator.
+ // It assumes that `this` does not dominate any blocks.
+ // Note that this method does not update the graph, reverse post order, loop
+ // information, nor make sure the blocks are consistent (for example ending
+ // with a control flow instruction).
+ void DisconnectFromAll();
+
void AddInstruction(HInstruction* instruction);
void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
// Replace instruction `initial` with `replacement` within this block.
@@ -582,6 +619,10 @@
bool IsCatchBlock() const { return is_catch_block_; }
void SetIsCatchBlock() { is_catch_block_ = true; }
+ bool EndsWithControlFlowInstruction() const;
+ bool EndsWithIf() const;
+ bool HasSinglePhi() const;
+
private:
HGraph* graph_;
GrowableArray<HBasicBlock*> predecessors_;
@@ -604,6 +645,31 @@
DISALLOW_COPY_AND_ASSIGN(HBasicBlock);
};
+// Iterates over the LoopInformation of all loops which contain 'block'
+// from the innermost to the outermost.
+class HLoopInformationOutwardIterator : public ValueObject {
+ public:
+ explicit HLoopInformationOutwardIterator(const HBasicBlock& block)
+ : current_(block.GetLoopInformation()) {}
+
+ bool Done() const { return current_ == nullptr; }
+
+ void Advance() {
+ DCHECK(!Done());
+ current_ = current_->GetHeader()->GetDominator()->GetLoopInformation();
+ }
+
+ HLoopInformation* Current() const {
+ DCHECK(!Done());
+ return current_;
+ }
+
+ private:
+ HLoopInformation* current_;
+
+ DISALLOW_COPY_AND_ASSIGN(HLoopInformationOutwardIterator);
+};
+
#define FOR_EACH_CONCRETE_INSTRUCTION(M) \
M(Add, BinaryOperation) \
M(And, BinaryOperation) \
@@ -1200,6 +1266,8 @@
return NeedsEnvironment() || IsLoadClass() || IsLoadString();
}
+ virtual bool NeedsDexCache() const { return false; }
+
protected:
virtual const HUserRecord<HInstruction*> InputRecordAt(size_t i) const = 0;
virtual void SetRawInputRecordAt(size_t index, const HUserRecord<HInstruction*>& input) = 0;
@@ -1858,8 +1926,6 @@
virtual bool IsZero() const { return false; }
virtual bool IsOne() const { return false; }
- static HConstant* NewConstant(ArenaAllocator* allocator, Primitive::Type type, int64_t val);
-
DECLARE_INSTRUCTION(Constant);
private:
@@ -1868,70 +1934,80 @@
class HFloatConstant : public HConstant {
public:
- explicit HFloatConstant(float value) : HConstant(Primitive::kPrimFloat), value_(value) {}
-
float GetValue() const { return value_; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- return bit_cast<float, int32_t>(other->AsFloatConstant()->value_) ==
- bit_cast<float, int32_t>(value_);
+ return bit_cast<uint32_t, float>(other->AsFloatConstant()->value_) ==
+ bit_cast<uint32_t, float>(value_);
}
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
bool IsMinusOne() const OVERRIDE {
- return bit_cast<uint32_t>(AsFloatConstant()->GetValue()) == bit_cast<uint32_t>((-1.0f));
+ return bit_cast<uint32_t, float>(AsFloatConstant()->GetValue()) ==
+ bit_cast<uint32_t, float>((-1.0f));
}
bool IsZero() const OVERRIDE {
return AsFloatConstant()->GetValue() == 0.0f;
}
bool IsOne() const OVERRIDE {
- return bit_cast<uint32_t>(AsFloatConstant()->GetValue()) == bit_cast<uint32_t>(1.0f);
+ return bit_cast<uint32_t, float>(AsFloatConstant()->GetValue()) ==
+ bit_cast<uint32_t, float>(1.0f);
}
DECLARE_INSTRUCTION(FloatConstant);
private:
+ explicit HFloatConstant(float value) : HConstant(Primitive::kPrimFloat), value_(value) {}
+
const float value_;
+ // Only the SsaBuilder can currently create floating-point constants. If we
+ // ever need to create them later in the pipeline, we will have to handle them
+ // the same way as integral constants.
+ friend class SsaBuilder;
DISALLOW_COPY_AND_ASSIGN(HFloatConstant);
};
class HDoubleConstant : public HConstant {
public:
- explicit HDoubleConstant(double value) : HConstant(Primitive::kPrimDouble), value_(value) {}
-
double GetValue() const { return value_; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- return bit_cast<double, int64_t>(other->AsDoubleConstant()->value_) ==
- bit_cast<double, int64_t>(value_);
+ return bit_cast<uint64_t, double>(other->AsDoubleConstant()->value_) ==
+ bit_cast<uint64_t, double>(value_);
}
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
bool IsMinusOne() const OVERRIDE {
- return bit_cast<uint64_t>(AsDoubleConstant()->GetValue()) == bit_cast<uint64_t>((-1.0));
+ return bit_cast<uint64_t, double>(AsDoubleConstant()->GetValue()) ==
+ bit_cast<uint64_t, double>((-1.0));
}
bool IsZero() const OVERRIDE {
return AsDoubleConstant()->GetValue() == 0.0;
}
bool IsOne() const OVERRIDE {
- return bit_cast<uint64_t>(AsDoubleConstant()->GetValue()) == bit_cast<uint64_t>(1.0);
+ return bit_cast<uint64_t, double>(AsDoubleConstant()->GetValue()) ==
+ bit_cast<uint64_t, double>(1.0);
}
DECLARE_INSTRUCTION(DoubleConstant);
private:
+ explicit HDoubleConstant(double value) : HConstant(Primitive::kPrimDouble), value_(value) {}
+
const double value_;
+ // Only the SsaBuilder can currently create floating-point constants. If we
+ // ever need to create them later in the pipeline, we will have to handle them
+ // the same way as integral constants.
+ friend class SsaBuilder;
DISALLOW_COPY_AND_ASSIGN(HDoubleConstant);
};
class HNullConstant : public HConstant {
public:
- HNullConstant() : HConstant(Primitive::kPrimNot) {}
-
bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -1943,6 +2019,9 @@
DECLARE_INSTRUCTION(NullConstant);
private:
+ HNullConstant() : HConstant(Primitive::kPrimNot) {}
+
+ friend class HGraph;
DISALLOW_COPY_AND_ASSIGN(HNullConstant);
};
@@ -1950,8 +2029,6 @@
// synthesized (for example with the if-eqz instruction).
class HIntConstant : public HConstant {
public:
- explicit HIntConstant(int32_t value) : HConstant(Primitive::kPrimInt), value_(value) {}
-
int32_t GetValue() const { return value_; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
@@ -1972,15 +2049,18 @@
DECLARE_INSTRUCTION(IntConstant);
private:
+ explicit HIntConstant(int32_t value) : HConstant(Primitive::kPrimInt), value_(value) {}
+
const int32_t value_;
+ friend class HGraph;
+ ART_FRIEND_TEST(GraphTest, InsertInstructionBefore);
+ ART_FRIEND_TEST(ParallelMoveTest, ConstantLast);
DISALLOW_COPY_AND_ASSIGN(HIntConstant);
};
class HLongConstant : public HConstant {
public:
- explicit HLongConstant(int64_t value) : HConstant(Primitive::kPrimLong), value_(value) {}
-
int64_t GetValue() const { return value_; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
@@ -1996,8 +2076,11 @@
DECLARE_INSTRUCTION(LongConstant);
private:
+ explicit HLongConstant(int64_t value) : HConstant(Primitive::kPrimLong), value_(value) {}
+
const int64_t value_;
+ friend class HGraph;
DISALLOW_COPY_AND_ASSIGN(HLongConstant);
};
@@ -2077,8 +2160,10 @@
uint32_t dex_pc,
uint32_t dex_method_index,
bool is_recursive,
+ InvokeType original_invoke_type,
InvokeType invoke_type)
: HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index),
+ original_invoke_type_(original_invoke_type),
invoke_type_(invoke_type),
is_recursive_(is_recursive) {}
@@ -2088,12 +2173,15 @@
return false;
}
+ InvokeType GetOriginalInvokeType() const { return original_invoke_type_; }
InvokeType GetInvokeType() const { return invoke_type_; }
bool IsRecursive() const { return is_recursive_; }
+ bool NeedsDexCache() const OVERRIDE { return !IsRecursive(); }
DECLARE_INSTRUCTION(InvokeStaticOrDirect);
private:
+ const InvokeType original_invoke_type_;
const InvokeType invoke_type_;
const bool is_recursive_;
@@ -2972,6 +3060,8 @@
return loaded_class_rti_.IsExact();
}
+ bool NeedsDexCache() const OVERRIDE { return !is_referrers_class_; }
+
DECLARE_INSTRUCTION(LoadClass);
private:
@@ -3007,6 +3097,7 @@
// TODO: Can we deopt or debug when we resolve a string?
bool NeedsEnvironment() const OVERRIDE { return false; }
+ bool NeedsDexCache() const OVERRIDE { return true; }
DECLARE_INSTRUCTION(LoadString);
@@ -3024,7 +3115,7 @@
class HClinitCheck : public HExpression<1> {
public:
explicit HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
- : HExpression(Primitive::kPrimNot, SideEffects::All()),
+ : HExpression(Primitive::kPrimNot, SideEffects::ChangesSomething()),
dex_pc_(dex_pc) {
SetRawInputAt(0, constant);
}
@@ -3459,7 +3550,10 @@
class HReversePostOrderIterator : public ValueObject {
public:
- explicit HReversePostOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {}
+ explicit HReversePostOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {
+ // Check that reverse post order of the graph has been built.
+ DCHECK(!graph.GetReversePostOrder().IsEmpty());
+ }
bool Done() const { return index_ == graph_.GetReversePostOrder().Size(); }
HBasicBlock* Current() const { return graph_.GetReversePostOrder().Get(index_); }
@@ -3475,7 +3569,10 @@
class HPostOrderIterator : public ValueObject {
public:
explicit HPostOrderIterator(const HGraph& graph)
- : graph_(graph), index_(graph_.GetReversePostOrder().Size()) {}
+ : graph_(graph), index_(graph_.GetReversePostOrder().Size()) {
+ // Check that reverse post order of the graph has been built.
+ DCHECK(!graph.GetReversePostOrder().IsEmpty());
+ }
bool Done() const { return index_ == 0; }
HBasicBlock* Current() const { return graph_.GetReversePostOrder().Get(index_ - 1); }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index b70f925..b2f9c65 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -22,6 +22,7 @@
#include "base/arena_allocator.h"
#include "base/dumpable.h"
#include "base/timing_logger.h"
+#include "boolean_simplifier.h"
#include "bounds_check_elimination.h"
#include "builder.h"
#include "code_generator.h"
@@ -313,8 +314,9 @@
HDeadCodeElimination dce(graph);
HConstantFolding fold1(graph);
InstructionSimplifier simplify1(graph, stats);
+ HBooleanSimplifier boolean_not(graph);
- HInliner inliner(graph, dex_compilation_unit, driver, stats);
+ HInliner inliner(graph, dex_compilation_unit, dex_compilation_unit, driver, stats);
HConstantFolding fold2(graph);
SideEffectsAnalysis side_effects(graph);
@@ -331,6 +333,9 @@
&dce,
&fold1,
&simplify1,
+ // BooleanSimplifier depends on the InstructionSimplifier removing redundant
+ // suspend checks to recognize empty blocks.
+ &boolean_not,
&inliner,
&fold2,
&side_effects,
@@ -462,13 +467,22 @@
return nullptr;
}
+ // Implementation of the space filter: do not compile a code item whose size in
+ // code units is bigger than 256.
+ static constexpr size_t kSpaceFilterOptimizingThreshold = 256;
+ const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
+ if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace)
+ && (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledSpaceFilter);
+ return nullptr;
+ }
+
DexCompilationUnit dex_compilation_unit(
nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item,
class_def_idx, method_idx, access_flags,
compiler_driver->GetVerifiedMethod(&dex_file, method_idx));
- ArenaPool pool;
- ArenaAllocator arena(&pool);
+ ArenaAllocator arena(Runtime::Current()->GetArenaPool());
HGraph* graph = new (&arena) HGraph(
&arena, compiler_driver->GetCompilerOptions().GetDebuggable());
@@ -569,8 +583,13 @@
if (method != nullptr) {
return method;
}
- return delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
- class_loader, dex_file);
+ method = delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ class_loader, dex_file);
+
+ if (method != nullptr) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kCompiledQuick);
+ }
+ return method;
}
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 3ebf0f8..b97a667 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -28,6 +28,7 @@
kAttemptCompilation = 0,
kCompiledBaseline,
kCompiledOptimized,
+ kCompiledQuick,
kInlinedInvoke,
kNotCompiledUnsupportedIsa,
kNotCompiledPathological,
@@ -38,6 +39,7 @@
kNotCompiledUnresolvedMethod,
kNotCompiledUnresolvedField,
kNotCompiledNonSequentialRegPair,
+ kNotCompiledSpaceFilter,
kNotOptimizedTryCatch,
kNotOptimizedDisabled,
kNotCompiledCantAccesType,
@@ -64,16 +66,22 @@
compile_stats_[kCompiledBaseline] * 100 / compile_stats_[kAttemptCompilation];
size_t optimized_percent =
compile_stats_[kCompiledOptimized] * 100 / compile_stats_[kAttemptCompilation];
+ size_t quick_percent =
+ compile_stats_[kCompiledQuick] * 100 / compile_stats_[kAttemptCompilation];
std::ostringstream oss;
- oss << "Attempted compilation of " << compile_stats_[kAttemptCompilation] << " methods: "
- << unoptimized_percent << "% (" << compile_stats_[kCompiledBaseline] << ") unoptimized, "
- << optimized_percent << "% (" << compile_stats_[kCompiledOptimized] << ") optimized.";
+ oss << "Attempted compilation of " << compile_stats_[kAttemptCompilation] << " methods: ";
+
+ oss << unoptimized_percent << "% (" << compile_stats_[kCompiledBaseline] << ") unoptimized, ";
+ oss << optimized_percent << "% (" << compile_stats_[kCompiledOptimized] << ") optimized, ";
+ oss << quick_percent << "% (" << compile_stats_[kCompiledQuick] << ") quick.";
+
+ LOG(INFO) << oss.str();
+
for (int i = 0; i < kLastStat; i++) {
if (compile_stats_[i] != 0) {
- oss << "\n" << PrintMethodCompilationStat(i) << ": " << compile_stats_[i];
+ VLOG(compiler) << PrintMethodCompilationStat(i) << ": " << compile_stats_[i];
}
}
- LOG(INFO) << oss.str();
}
}
@@ -83,6 +91,7 @@
case kAttemptCompilation : return "kAttemptCompilation";
case kCompiledBaseline : return "kCompiledBaseline";
case kCompiledOptimized : return "kCompiledOptimized";
+ case kCompiledQuick : return "kCompiledQuick";
case kInlinedInvoke : return "kInlinedInvoke";
case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
case kNotCompiledPathological : return "kNotCompiledPathological";
@@ -96,6 +105,7 @@
case kNotOptimizedDisabled : return "kNotOptimizedDisabled";
case kNotOptimizedTryCatch : return "kNotOptimizedTryCatch";
case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType";
+ case kNotCompiledSpaceFilter : return "kNotCompiledSpaceFilter";
case kNotOptimizedRegisterAllocator : return "kNotOptimizedRegisterAllocator";
case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction";
case kRemovedCheckedCast: return "kRemovedCheckedCast";
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index cecc210..cf38bd3 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -213,7 +213,7 @@
LiveInterval* interval =
LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
temp_intervals_.Add(interval);
- interval->AddRange(position, position + 1);
+ interval->AddTempUse(instruction, i);
unhandled_core_intervals_.Add(interval);
break;
}
@@ -222,7 +222,7 @@
LiveInterval* interval =
LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble);
temp_intervals_.Add(interval);
- interval->AddRange(position, position + 1);
+ interval->AddTempUse(instruction, i);
if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
interval->AddHighInterval(true);
LiveInterval* high = interval->GetHighInterval();
@@ -851,6 +851,23 @@
return false;
}
+bool RegisterAllocator::PotentiallyRemoveOtherHalf(LiveInterval* interval,
+ GrowableArray<LiveInterval*>* intervals,
+ size_t index) {
+ if (interval->IsLowInterval()) {
+ DCHECK_EQ(intervals->Get(index), interval->GetHighInterval());
+ intervals->DeleteAt(index);
+ return true;
+ } else if (interval->IsHighInterval()) {
+ DCHECK_GT(index, 0u);
+ DCHECK_EQ(intervals->Get(index - 1), interval->GetLowInterval());
+ intervals->DeleteAt(index - 1);
+ return true;
+ } else {
+ return false;
+ }
+}
+
// Find the register that is used the last, and spill the interval
// that holds it. If the first use of `current` is after that register
// we spill `current` instead.
@@ -974,33 +991,17 @@
if (active->GetRegister() == reg) {
DCHECK(!active->IsFixed());
LiveInterval* split = Split(active, current->GetStart());
- active_.DeleteAt(i);
if (split != active) {
handled_.Add(active);
}
+ active_.DeleteAt(i);
+ PotentiallyRemoveOtherHalf(active, &active_, i);
AddSorted(unhandled_, split);
-
- if (active->IsLowInterval() || active->IsHighInterval()) {
- LiveInterval* other_half = active->IsLowInterval()
- ? active->GetHighInterval()
- : active->GetLowInterval();
- // We also need to remove the other half from the list of actives.
- bool found = false;
- for (size_t j = 0; j < active_.Size(); ++j) {
- if (active_.Get(j) == other_half) {
- found = true;
- active_.DeleteAt(j);
- handled_.Add(other_half);
- break;
- }
- }
- DCHECK(found);
- }
break;
}
}
- for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
+ for (size_t i = 0; i < inactive_.Size(); ++i) {
LiveInterval* inactive = inactive_.Get(i);
if (inactive->GetRegister() == reg) {
if (!current->IsSplit() && !inactive->IsFixed()) {
@@ -1024,29 +1025,14 @@
// If it's inactive, it must start before the current interval.
DCHECK_NE(split, inactive);
inactive_.DeleteAt(i);
+ if (PotentiallyRemoveOtherHalf(inactive, &inactive_, i) && inactive->IsHighInterval()) {
+ // We have removed an entry prior to `inactive`. So we need to decrement.
+ --i;
+ }
+ // Decrement because we have removed `inactive` from the list.
--i;
- --e;
handled_.Add(inactive);
AddSorted(unhandled_, split);
-
- if (inactive->IsLowInterval() || inactive->IsHighInterval()) {
- LiveInterval* other_half = inactive->IsLowInterval()
- ? inactive->GetHighInterval()
- : inactive->GetLowInterval();
-
- // We also need to remove the other half from the list of inactives.
- bool found = false;
- for (size_t j = 0; j < inactive_.Size(); ++j) {
- if (inactive_.Get(j) == other_half) {
- found = true;
- inactive_.DeleteAt(j);
- --e;
- handled_.Add(other_half);
- break;
- }
- }
- DCHECK(found);
- }
}
}
}
@@ -1695,8 +1681,6 @@
}
// Assign temp locations.
- HInstruction* current = nullptr;
- size_t temp_index = 0;
for (size_t i = 0; i < temp_intervals_.Size(); ++i) {
LiveInterval* temp = temp_intervals_.Get(i);
if (temp->IsHighInterval()) {
@@ -1704,25 +1688,20 @@
continue;
}
HInstruction* at = liveness_.GetTempUser(temp);
- if (at != current) {
- temp_index = 0;
- current = at;
- }
+ size_t temp_index = liveness_.GetTempIndex(temp);
LocationSummary* locations = at->GetLocations();
switch (temp->GetType()) {
case Primitive::kPrimInt:
- locations->SetTempAt(
- temp_index++, Location::RegisterLocation(temp->GetRegister()));
+ locations->SetTempAt(temp_index, Location::RegisterLocation(temp->GetRegister()));
break;
case Primitive::kPrimDouble:
if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
Location location = Location::FpuRegisterPairLocation(
temp->GetRegister(), temp->GetHighInterval()->GetRegister());
- locations->SetTempAt(temp_index++, location);
+ locations->SetTempAt(temp_index, location);
} else {
- locations->SetTempAt(
- temp_index++, Location::FpuRegisterLocation(temp->GetRegister()));
+ locations->SetTempAt(temp_index, Location::FpuRegisterLocation(temp->GetRegister()));
}
break;
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index fcc6112..717be75 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -144,6 +144,13 @@
size_t first_register_use,
size_t* next_use);
+ // If `interval` has another half, remove it from the list of `intervals`.
+ // `index` holds the index at which `interval` is in `intervals`.
+ // Returns whether there is another half.
+ bool PotentiallyRemoveOtherHalf(LiveInterval* interval,
+ GrowableArray<LiveInterval*>* intervals,
+ size_t index);
+
ArenaAllocator* const allocator_;
CodeGenerator* const codegen_;
const SsaLivenessAnalysis& liveness_;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index b757a3b..7c3a035 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -596,6 +596,8 @@
graph->AddBlock(exit);
block->AddSuccessor(exit);
exit->AddInstruction(new (allocator) HExit());
+
+ graph->BuildDominatorTree();
return graph;
}
@@ -642,11 +644,10 @@
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* constant1 = new (allocator) HIntConstant(0);
- HInstruction* constant2 = new (allocator) HIntConstant(0);
entry->AddInstruction(parameter);
- entry->AddInstruction(constant1);
- entry->AddInstruction(constant2);
+
+ HInstruction* constant1 = graph->GetIntConstant(1);
+ HInstruction* constant2 = graph->GetIntConstant(2);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
graph->AddBlock(block);
@@ -658,6 +659,8 @@
block->AddInstruction(*second_sub);
block->AddInstruction(new (allocator) HExit());
+
+ graph->BuildDominatorTree();
return graph;
}
@@ -719,6 +722,8 @@
block->AddInstruction(*div);
block->AddInstruction(new (allocator) HExit());
+
+ graph->BuildDominatorTree();
return graph;
}
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index ba11e90..fcc4e69 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -353,18 +353,18 @@
* is used for floating point operations. We create a floating-point equivalent
* constant to make the operations correctly typed.
*/
-static HFloatConstant* GetFloatEquivalent(HIntConstant* constant) {
+HFloatConstant* SsaBuilder::GetFloatEquivalent(HIntConstant* constant) {
// We place the floating point constant next to this constant.
HFloatConstant* result = constant->GetNext()->AsFloatConstant();
if (result == nullptr) {
HGraph* graph = constant->GetBlock()->GetGraph();
ArenaAllocator* allocator = graph->GetArena();
- result = new (allocator) HFloatConstant(bit_cast<int32_t, float>(constant->GetValue()));
+ result = new (allocator) HFloatConstant(bit_cast<float, int32_t>(constant->GetValue()));
constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
} else {
// If there is already a constant with the expected type, we know it is
// the floating point equivalent of this constant.
- DCHECK_EQ((bit_cast<float, int32_t>(result->GetValue())), constant->GetValue());
+ DCHECK_EQ((bit_cast<int32_t, float>(result->GetValue())), constant->GetValue());
}
return result;
}
@@ -375,18 +375,18 @@
* is used for floating point operations. We create a floating-point equivalent
* constant to make the operations correctly typed.
*/
-static HDoubleConstant* GetDoubleEquivalent(HLongConstant* constant) {
+HDoubleConstant* SsaBuilder::GetDoubleEquivalent(HLongConstant* constant) {
// We place the floating point constant next to this constant.
HDoubleConstant* result = constant->GetNext()->AsDoubleConstant();
if (result == nullptr) {
HGraph* graph = constant->GetBlock()->GetGraph();
ArenaAllocator* allocator = graph->GetArena();
- result = new (allocator) HDoubleConstant(bit_cast<int64_t, double>(constant->GetValue()));
+ result = new (allocator) HDoubleConstant(bit_cast<double, int64_t>(constant->GetValue()));
constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
} else {
// If there is already a constant with the expected type, we know it is
// the floating point equivalent of this constant.
- DCHECK_EQ((bit_cast<double, int64_t>(result->GetValue())), constant->GetValue());
+ DCHECK_EQ((bit_cast<int64_t, double>(result->GetValue())), constant->GetValue());
}
return result;
}
@@ -398,7 +398,7 @@
* floating point registers and core registers), we need to create a copy of the
* phi with a floating point / reference type.
*/
-static HPhi* GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive::Type type) {
+HPhi* SsaBuilder::GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive::Type type) {
// We place the floating point /reference phi next to this phi.
HInstruction* next = phi->GetNext();
if (next != nullptr
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 24dc449..569b3e2 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -85,6 +85,10 @@
static constexpr const char* kSsaBuilderPassName = "ssa_builder";
private:
+ static HFloatConstant* GetFloatEquivalent(HIntConstant* constant);
+ static HDoubleConstant* GetDoubleEquivalent(HLongConstant* constant);
+ static HPhi* GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive::Type type);
+
// Locals for the current block being visited.
HEnvironment* current_locals_;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index c0d6f42..0f3973e 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -71,8 +71,8 @@
// for it.
GrowableArray<uint32_t> forward_predecessors(graph_.GetArena(), graph_.GetBlocks().Size());
forward_predecessors.SetSize(graph_.GetBlocks().Size());
- for (size_t i = 0, e = graph_.GetBlocks().Size(); i < e; ++i) {
- HBasicBlock* block = graph_.GetBlocks().Get(i);
+ for (HReversePostOrderIterator it(graph_); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
size_t number_of_forward_predecessors = block->GetPredecessors().Size();
if (block->IsLoopHeader()) {
// We rely on having simplified the CFG.
@@ -318,6 +318,8 @@
int LiveInterval::FindFirstRegisterHint(size_t* free_until) const {
DCHECK(!IsHighInterval());
+ if (IsTemp()) return kNoRegister;
+
if (GetParent() == this && defined_by_ != nullptr) {
// This is the first interval for the instruction. Try to find
// a register based on its definition.
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index b57029d..bc78dc2 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -180,6 +180,15 @@
// This interval is the result of a split.
bool IsSplit() const { return parent_ != this; }
+ void AddTempUse(HInstruction* instruction, size_t temp_index) {
+ DCHECK(IsTemp());
+ DCHECK(first_use_ == nullptr) << "A temporary can only have one user";
+ size_t position = instruction->GetLifetimePosition();
+ first_use_ = new (allocator_) UsePosition(
+ instruction, temp_index, /* is_environment */ false, position, first_use_);
+ AddRange(position, position + 1);
+ }
+
void AddUse(HInstruction* instruction, size_t input_index, bool is_environment) {
// Set the use within the instruction.
size_t position = instruction->GetLifetimePosition() + 1;
@@ -856,7 +865,15 @@
HInstruction* GetTempUser(LiveInterval* temp) const {
// A temporary shares the same lifetime start as the instruction that requires it.
DCHECK(temp->IsTemp());
- return GetInstructionFromPosition(temp->GetStart() / 2);
+ HInstruction* user = GetInstructionFromPosition(temp->GetStart() / 2);
+ DCHECK_EQ(user, temp->GetFirstUse()->GetUser());
+ return user;
+ }
+
+ size_t GetTempIndex(LiveInterval* temp) const {
+ // We use the input index to store the index of the temporary in the user's temporary list.
+ DCHECK(temp->IsTemp());
+ return temp->GetFirstUse()->GetInputIndex();
}
size_t GetMaxLifetimePosition() const {
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index e1a5afe..a73c8d7 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -17,7 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
#define ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
-#include "base/bit_vector.h"
+#include "base/arena_containers.h"
+#include "base/bit_vector-inl.h"
#include "base/value_object.h"
#include "memory_region.h"
#include "nodes.h"
@@ -26,6 +27,32 @@
namespace art {
+// Helper to build art::StackMapStream::LocationCatalogEntriesIndices.
+class LocationCatalogEntriesIndicesEmptyFn {
+ public:
+ void MakeEmpty(std::pair<DexRegisterLocation, size_t>& item) const {
+ item.first = DexRegisterLocation::None();
+ }
+ bool IsEmpty(const std::pair<DexRegisterLocation, size_t>& item) const {
+ return item.first == DexRegisterLocation::None();
+ }
+};
+
+// Hash function for art::StackMapStream::LocationCatalogEntriesIndices.
+// This hash function does not create collisions.
+class DexRegisterLocationHashFn {
+ public:
+ size_t operator()(DexRegisterLocation key) const {
+ // Concatenate `key`s fields to create a 64-bit value to be hashed.
+ int64_t kind_and_value =
+ (static_cast<int64_t>(key.kind_) << 32) | static_cast<int64_t>(key.value_);
+ return inner_hash_fn_(kind_and_value);
+ }
+ private:
+ std::hash<int64_t> inner_hash_fn_;
+};
+
+
/**
* Collects and builds stack maps for a method. All the stack maps
* for a method are placed in a CodeInfo object.
@@ -35,10 +62,15 @@
explicit StackMapStream(ArenaAllocator* allocator)
: allocator_(allocator),
stack_maps_(allocator, 10),
+ location_catalog_entries_(allocator, 4),
dex_register_locations_(allocator, 10 * 4),
inline_infos_(allocator, 2),
stack_mask_max_(-1),
- number_of_stack_maps_with_inline_info_(0) {}
+ dex_pc_max_(0),
+ native_pc_offset_max_(0),
+ register_mask_max_(0),
+ number_of_stack_maps_with_inline_info_(0),
+ dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(), allocator->Adapter()) {}
// Compute bytes needed to encode a mask with the given maximum element.
static uint32_t StackMaskEncodingSize(int max_element) {
@@ -57,6 +89,7 @@
size_t dex_register_locations_start_index;
size_t inline_infos_start_index;
BitVector* live_dex_registers_mask;
+ uint32_t dex_register_map_hash;
};
struct InlineInfoEntry {
@@ -78,6 +111,7 @@
entry.inlining_depth = inlining_depth;
entry.dex_register_locations_start_index = dex_register_locations_.Size();
entry.inline_infos_start_index = inline_infos_.Size();
+ entry.dex_register_map_hash = 0;
if (num_dex_registers != 0) {
entry.live_dex_registers_mask =
new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true);
@@ -92,6 +126,10 @@
if (inlining_depth > 0) {
number_of_stack_maps_with_inline_info_++;
}
+
+ dex_pc_max_ = std::max(dex_pc_max_, dex_pc);
+ native_pc_offset_max_ = std::max(native_pc_offset_max_, native_pc_offset);
+ register_mask_max_ = std::max(register_mask_max_, register_mask);
}
void AddInlineInfoEntry(uint32_t method_index) {
@@ -100,8 +138,9 @@
inline_infos_.Add(entry);
}
- size_t ComputeNeededSize() const {
+ size_t ComputeNeededSize() {
size_t size = CodeInfo::kFixedSize
+ + ComputeDexRegisterLocationCatalogSize()
+ ComputeStackMapsSize()
+ ComputeDexRegisterMapsSize()
+ ComputeInlineInfoSize();
@@ -113,33 +152,60 @@
return StackMaskEncodingSize(stack_mask_max_);
}
- size_t ComputeStackMapsSize() const {
- return stack_maps_.Size() * StackMap::ComputeStackMapSize(ComputeStackMaskSize());
+ size_t ComputeStackMapsSize() {
+ return stack_maps_.Size() * StackMap::ComputeStackMapSize(
+ ComputeStackMaskSize(),
+ ComputeInlineInfoSize(),
+ ComputeDexRegisterMapsSize(),
+ dex_pc_max_,
+ native_pc_offset_max_,
+ register_mask_max_);
}
- // Compute the size of the Dex register map of `entry`.
- size_t ComputeDexRegisterMapSize(const StackMapEntry& entry) const {
- size_t size = DexRegisterMap::kFixedSize;
- // Add the bit mask for the dex register liveness.
- size += DexRegisterMap::LiveBitMaskSize(entry.num_dex_registers);
- for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
- dex_register_number < entry.num_dex_registers;
- ++dex_register_number) {
- if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
- DexRegisterLocation dex_register_location = dex_register_locations_.Get(
- entry.dex_register_locations_start_index + index_in_dex_register_locations);
- size += DexRegisterMap::EntrySize(dex_register_location);
- index_in_dex_register_locations++;
- }
+ // Compute the size of the Dex register location catalog of `entry`.
+ size_t ComputeDexRegisterLocationCatalogSize() const {
+ size_t size = DexRegisterLocationCatalog::kFixedSize;
+ for (size_t location_catalog_entry_index = 0;
+ location_catalog_entry_index < location_catalog_entries_.Size();
+ ++location_catalog_entry_index) {
+ DexRegisterLocation dex_register_location =
+ location_catalog_entries_.Get(location_catalog_entry_index);
+ size += DexRegisterLocationCatalog::EntrySize(dex_register_location);
}
return size;
}
+ size_t ComputeDexRegisterMapSize(const StackMapEntry& entry) const {
+ // Size of the map in bytes.
+ size_t size = DexRegisterMap::kFixedSize;
+ // Add the live bit mask for the Dex register liveness.
+ size += DexRegisterMap::GetLiveBitMaskSize(entry.num_dex_registers);
+ // Compute the size of the set of live Dex register entries.
+ size_t number_of_live_dex_registers = 0;
+ for (size_t dex_register_number = 0;
+ dex_register_number < entry.num_dex_registers;
+ ++dex_register_number) {
+ if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
+ ++number_of_live_dex_registers;
+ }
+ }
+ size_t map_entries_size_in_bits =
+ DexRegisterMap::SingleEntrySizeInBits(location_catalog_entries_.Size())
+ * number_of_live_dex_registers;
+ size_t map_entries_size_in_bytes =
+ RoundUp(map_entries_size_in_bits, kBitsPerByte) / kBitsPerByte;
+ size += map_entries_size_in_bytes;
+ return size;
+ }
+
// Compute the size of all the Dex register maps.
- size_t ComputeDexRegisterMapsSize() const {
+ size_t ComputeDexRegisterMapsSize() {
size_t size = 0;
for (size_t i = 0; i < stack_maps_.Size(); ++i) {
- size += ComputeDexRegisterMapSize(stack_maps_.Get(i));
+ if (FindEntryWithTheSameDexMap(i) == kNoSameDexMapFound) {
+ // Entries with the same dex map will have the same offset.
+ size += ComputeDexRegisterMapSize(stack_maps_.Get(i));
+ }
}
return size;
}
@@ -151,11 +217,19 @@
+ (number_of_stack_maps_with_inline_info_ * InlineInfo::kFixedSize);
}
- size_t ComputeDexRegisterMapsStart() const {
- return CodeInfo::kFixedSize + ComputeStackMapsSize();
+ size_t ComputeDexRegisterLocationCatalogStart() const {
+ return CodeInfo::kFixedSize;
}
- size_t ComputeInlineInfoStart() const {
+ size_t ComputeStackMapsStart() const {
+ return ComputeDexRegisterLocationCatalogStart() + ComputeDexRegisterLocationCatalogSize();
+ }
+
+ size_t ComputeDexRegisterMapsStart() {
+ return ComputeStackMapsStart() + ComputeStackMapsSize();
+ }
+
+ size_t ComputeInlineInfoStart() {
return ComputeDexRegisterMapsStart() + ComputeDexRegisterMapsSize();
}
@@ -165,19 +239,44 @@
code_info.SetOverallSize(region.size());
size_t stack_mask_size = ComputeStackMaskSize();
- uint8_t* memory_start = region.start();
+
+ size_t dex_register_map_size = ComputeDexRegisterMapsSize();
+ size_t inline_info_size = ComputeInlineInfoSize();
MemoryRegion dex_register_locations_region = region.Subregion(
ComputeDexRegisterMapsStart(),
- ComputeDexRegisterMapsSize());
+ dex_register_map_size);
MemoryRegion inline_infos_region = region.Subregion(
ComputeInlineInfoStart(),
- ComputeInlineInfoSize());
+ inline_info_size);
+ code_info.SetEncoding(inline_info_size,
+ dex_register_map_size,
+ dex_pc_max_,
+ native_pc_offset_max_,
+ register_mask_max_);
code_info.SetNumberOfStackMaps(stack_maps_.Size());
code_info.SetStackMaskSize(stack_mask_size);
- DCHECK_EQ(code_info.StackMapsSize(), ComputeStackMapsSize());
+ DCHECK_EQ(code_info.GetStackMapsSize(), ComputeStackMapsSize());
+
+ // Set the Dex register location catalog.
+ code_info.SetNumberOfDexRegisterLocationCatalogEntries(
+ location_catalog_entries_.Size());
+ MemoryRegion dex_register_location_catalog_region = region.Subregion(
+ ComputeDexRegisterLocationCatalogStart(),
+ ComputeDexRegisterLocationCatalogSize());
+ DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
+ // Offset in `dex_register_location_catalog` where to store the next
+ // register location.
+ size_t location_catalog_offset = DexRegisterLocationCatalog::kFixedSize;
+ for (size_t i = 0, e = location_catalog_entries_.Size(); i < e; ++i) {
+ DexRegisterLocation dex_register_location = location_catalog_entries_.Get(i);
+ dex_register_location_catalog.SetRegisterInfo(location_catalog_offset, dex_register_location);
+ location_catalog_offset += DexRegisterLocationCatalog::EntrySize(dex_register_location);
+ }
+ // Ensure we reached the end of the Dex registers location_catalog.
+ DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
uintptr_t next_dex_register_map_offset = 0;
uintptr_t next_inline_info_offset = 0;
@@ -185,44 +284,54 @@
StackMap stack_map = code_info.GetStackMapAt(i);
StackMapEntry entry = stack_maps_.Get(i);
- stack_map.SetDexPc(entry.dex_pc);
- stack_map.SetNativePcOffset(entry.native_pc_offset);
- stack_map.SetRegisterMask(entry.register_mask);
+ stack_map.SetDexPc(code_info, entry.dex_pc);
+ stack_map.SetNativePcOffset(code_info, entry.native_pc_offset);
+ stack_map.SetRegisterMask(code_info, entry.register_mask);
if (entry.sp_mask != nullptr) {
- stack_map.SetStackMask(*entry.sp_mask);
+ stack_map.SetStackMask(code_info, *entry.sp_mask);
}
- if (entry.num_dex_registers != 0) {
- // Set the Dex register map.
- MemoryRegion register_region =
- dex_register_locations_region.Subregion(
- next_dex_register_map_offset,
- ComputeDexRegisterMapSize(entry));
- next_dex_register_map_offset += register_region.size();
- DexRegisterMap dex_register_map(register_region);
- stack_map.SetDexRegisterMapOffset(register_region.start() - memory_start);
+ if (entry.num_dex_registers == 0) {
+ // No dex map available.
+ stack_map.SetDexRegisterMapOffset(code_info, StackMap::kNoDexRegisterMap);
+ } else {
+ // Search for an entry with the same dex map.
+ size_t entry_with_same_map = FindEntryWithTheSameDexMap(i);
+ if (entry_with_same_map != kNoSameDexMapFound) {
+ // If we have a hit reuse the offset.
+ stack_map.SetDexRegisterMapOffset(code_info,
+ code_info.GetStackMapAt(entry_with_same_map).GetDexRegisterMapOffset(code_info));
+ } else {
+ // New dex registers maps should be added to the stack map.
+ MemoryRegion register_region =
+ dex_register_locations_region.Subregion(
+ next_dex_register_map_offset,
+ ComputeDexRegisterMapSize(entry));
+ next_dex_register_map_offset += register_region.size();
+ DexRegisterMap dex_register_map(register_region);
+ stack_map.SetDexRegisterMapOffset(
+ code_info, register_region.start() - dex_register_locations_region.start());
- // Offset in `dex_register_map` where to store the next register entry.
- size_t offset = DexRegisterMap::kFixedSize;
- dex_register_map.SetLiveBitMask(offset,
- entry.num_dex_registers,
- *entry.live_dex_registers_mask);
- offset += DexRegisterMap::LiveBitMaskSize(entry.num_dex_registers);
- for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
- dex_register_number < entry.num_dex_registers;
- ++dex_register_number) {
- if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
- DexRegisterLocation dex_register_location = dex_register_locations_.Get(
- entry.dex_register_locations_start_index + index_in_dex_register_locations);
- dex_register_map.SetRegisterInfo(offset, dex_register_location);
- offset += DexRegisterMap::EntrySize(dex_register_location);
- ++index_in_dex_register_locations;
+ // Set the live bit mask.
+ dex_register_map.SetLiveBitMask(entry.num_dex_registers, *entry.live_dex_registers_mask);
+
+ // Set the dex register location mapping data.
+ for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
+ dex_register_number < entry.num_dex_registers;
+ ++dex_register_number) {
+ if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
+ size_t location_catalog_entry_index =
+ dex_register_locations_.Get(entry.dex_register_locations_start_index
+ + index_in_dex_register_locations);
+ dex_register_map.SetLocationCatalogEntryIndex(
+ index_in_dex_register_locations,
+ location_catalog_entry_index,
+ entry.num_dex_registers,
+ location_catalog_entries_.Size());
+ ++index_in_dex_register_locations;
+ }
}
}
- // Ensure we reached the end of the Dex registers region.
- DCHECK_EQ(offset, register_region.size());
- } else {
- stack_map.SetDexRegisterMapOffset(StackMap::kNoDexRegisterMap);
}
// Set the inlining info.
@@ -233,7 +342,9 @@
next_inline_info_offset += inline_region.size();
InlineInfo inline_info(inline_region);
- stack_map.SetInlineDescriptorOffset(inline_region.start() - memory_start);
+ // Currently relative to the dex register map.
+ stack_map.SetInlineDescriptorOffset(
+ code_info, inline_region.start() - dex_register_locations_region.start());
inline_info.SetDepth(entry.inlining_depth);
for (size_t j = 0; j < entry.inlining_depth; ++j) {
@@ -241,32 +352,142 @@
inline_info.SetMethodReferenceIndexAtDepth(j, inline_entry.method_index);
}
} else {
- stack_map.SetInlineDescriptorOffset(StackMap::kNoInlineInfo);
+ if (inline_info_size != 0) {
+ stack_map.SetInlineDescriptorOffset(code_info, StackMap::kNoInlineInfo);
+ }
}
}
}
void AddDexRegisterEntry(uint16_t dex_register, DexRegisterLocation::Kind kind, int32_t value) {
+ StackMapEntry entry = stack_maps_.Get(stack_maps_.Size() - 1);
+ DCHECK_LT(dex_register, entry.num_dex_registers);
+
if (kind != DexRegisterLocation::Kind::kNone) {
// Ensure we only use non-compressed location kind at this stage.
DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
<< DexRegisterLocation::PrettyDescriptor(kind);
- dex_register_locations_.Add(DexRegisterLocation(kind, value));
- stack_maps_.Get(stack_maps_.Size() - 1).live_dex_registers_mask->SetBit(dex_register);
+ DexRegisterLocation location(kind, value);
+
+ // Look for Dex register `location` in the location catalog (using the
+ // companion hash map of locations to indices). Use its index if it
+ // is already in the location catalog. If not, insert it (in the
+ // location catalog and the hash map) and use the newly created index.
+ auto it = location_catalog_entries_indices_.Find(location);
+ if (it != location_catalog_entries_indices_.end()) {
+ // Retrieve the index from the hash map.
+ dex_register_locations_.Add(it->second);
+ } else {
+ // Create a new entry in the location catalog and the hash map.
+ size_t index = location_catalog_entries_.Size();
+ location_catalog_entries_.Add(location);
+ dex_register_locations_.Add(index);
+ location_catalog_entries_indices_.Insert(std::make_pair(location, index));
+ }
+
+ entry.live_dex_registers_mask->SetBit(dex_register);
+ entry.dex_register_map_hash += (1 << dex_register);
+ entry.dex_register_map_hash += static_cast<uint32_t>(value);
+ entry.dex_register_map_hash += static_cast<uint32_t>(kind);
+ stack_maps_.Put(stack_maps_.Size() - 1, entry);
}
}
private:
+ // Returns the index of an entry with the same dex register map
+ // or kNoSameDexMapFound if no such entry exists.
+ size_t FindEntryWithTheSameDexMap(size_t entry_index) {
+ StackMapEntry entry = stack_maps_.Get(entry_index);
+ auto entries_it = dex_map_hash_to_stack_map_indices_.find(entry.dex_register_map_hash);
+ if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
+ // We don't have a perfect hash functions so we need a list to collect all stack maps
+ // which might have the same dex register map.
+ GrowableArray<uint32_t> stack_map_indices(allocator_, 1);
+ stack_map_indices.Add(entry_index);
+ dex_map_hash_to_stack_map_indices_.Put(entry.dex_register_map_hash, stack_map_indices);
+ return kNoSameDexMapFound;
+ }
+
+ // TODO: We don't need to add ourselves to the map if we can guarantee that
+ // FindEntryWithTheSameDexMap is called just once per stack map entry.
+ // A good way to do this is to cache the offset in the stack map entry. This
+ // is easier to do if we add markers when the stack map constructions begins
+ // and when it ends.
+
+ // We might have collisions, so we need to check whether or not we should
+ // add the entry to the map. `needs_to_be_added` keeps track of this.
+ bool needs_to_be_added = true;
+ size_t result = kNoSameDexMapFound;
+ for (size_t i = 0; i < entries_it->second.Size(); i++) {
+ size_t test_entry_index = entries_it->second.Get(i);
+ if (test_entry_index == entry_index) {
+ needs_to_be_added = false;
+ } else if (HaveTheSameDexMaps(stack_maps_.Get(test_entry_index), entry)) {
+ result = test_entry_index;
+ needs_to_be_added = false;
+ break;
+ }
+ }
+ if (needs_to_be_added) {
+ entries_it->second.Add(entry_index);
+ }
+ return result;
+ }
+
+ bool HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const {
+ if (a.live_dex_registers_mask == nullptr && b.live_dex_registers_mask == nullptr) {
+ return true;
+ }
+ if (a.live_dex_registers_mask == nullptr || b.live_dex_registers_mask == nullptr) {
+ return false;
+ }
+ if (a.num_dex_registers != b.num_dex_registers) {
+ return false;
+ }
+
+ int index_in_dex_register_locations = 0;
+ for (uint32_t i = 0; i < a.num_dex_registers; i++) {
+ if (a.live_dex_registers_mask->IsBitSet(i) != b.live_dex_registers_mask->IsBitSet(i)) {
+ return false;
+ }
+ if (a.live_dex_registers_mask->IsBitSet(i)) {
+ size_t a_loc = dex_register_locations_.Get(
+ a.dex_register_locations_start_index + index_in_dex_register_locations);
+ size_t b_loc = dex_register_locations_.Get(
+ b.dex_register_locations_start_index + index_in_dex_register_locations);
+ if (a_loc != b_loc) {
+ return false;
+ }
+ ++index_in_dex_register_locations;
+ }
+ }
+ return true;
+ }
+
ArenaAllocator* allocator_;
GrowableArray<StackMapEntry> stack_maps_;
- GrowableArray<DexRegisterLocation> dex_register_locations_;
+
+ // A catalog of unique [location_kind, register_value] pairs (per method).
+ GrowableArray<DexRegisterLocation> location_catalog_entries_;
+ // Map from Dex register location catalog entries to their indices in the
+ // location catalog.
+ typedef HashMap<DexRegisterLocation, size_t, LocationCatalogEntriesIndicesEmptyFn,
+ DexRegisterLocationHashFn> LocationCatalogEntriesIndices;
+ LocationCatalogEntriesIndices location_catalog_entries_indices_;
+
+ // A set of concatenated maps of Dex register locations indices to
+ // `location_catalog_entries_`.
+ GrowableArray<size_t> dex_register_locations_;
GrowableArray<InlineInfoEntry> inline_infos_;
int stack_mask_max_;
+ uint32_t dex_pc_max_;
+ uint32_t native_pc_offset_max_;
+ uint32_t register_mask_max_;
size_t number_of_stack_maps_with_inline_info_;
- ART_FRIEND_TEST(StackMapTest, Test1);
- ART_FRIEND_TEST(StackMapTest, Test2);
- ART_FRIEND_TEST(StackMapTest, TestNonLiveDexRegisters);
+ ArenaSafeMap<uint32_t, GrowableArray<uint32_t>> dex_map_hash_to_stack_map_indices_;
+
+ static constexpr uint32_t kNoSameDexMapFound = -1;
DISALLOW_COPY_AND_ASSIGN(StackMapStream);
};
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 87ac2e7..8d160bc 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -31,6 +31,8 @@
return true;
}
+using Kind = DexRegisterLocation::Kind;
+
TEST(StackMapTest, Test1) {
ArenaPool pool;
ArenaAllocator arena(&pool);
@@ -39,8 +41,8 @@
ArenaBitVector sp_mask(&arena, 0, false);
size_t number_of_dex_registers = 2;
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInStack, 0);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddDexRegisterEntry(0, Kind::kInStack, 0); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Short location.
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -51,29 +53,65 @@
ASSERT_EQ(0u, code_info.GetStackMaskSize());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+ uint32_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ ASSERT_EQ(2u, number_of_location_catalog_entries);
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ // The Dex register location catalog contains:
+ // - one 1-byte short Dex register location, and
+ // - one 5-byte large Dex register location.
+ size_t expected_location_catalog_size = 1u + 5u;
+ ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
+
StackMap stack_map = code_info.GetStackMapAt(0);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
- ASSERT_EQ(0u, stack_map.GetDexPc());
- ASSERT_EQ(64u, stack_map.GetNativePcOffset());
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask());
+ ASSERT_EQ(0u, stack_map.GetDexPc(code_info));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info));
- MemoryRegion stack_mask = stack_map.GetStackMask();
+ MemoryRegion stack_mask = stack_map.GetStackMask(code_info);
ASSERT_TRUE(SameBits(stack_mask, sp_mask));
- ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(7u, dex_registers.Size());
- DexRegisterLocation location0 = dex_registers.GetLocationKindAndValue(0, number_of_dex_registers);
- DexRegisterLocation location1 = dex_registers.GetLocationKindAndValue(1, number_of_dex_registers);
- ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kConstant, location1.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetInternalKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kConstantLargeValue, location1.GetInternalKind());
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask, and
+ // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+ size_t expected_dex_register_map_size = 1u + 1u;
+ ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+ ASSERT_EQ(Kind::kInStack,
+ dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstant,
+ dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInStack,
+ dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstantLargeValue,
+ dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
+
+ size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+ 0, number_of_dex_registers, number_of_location_catalog_entries);
+ size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+ 1, number_of_dex_registers, number_of_location_catalog_entries);
+ ASSERT_EQ(0u, index0);
+ ASSERT_EQ(1u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kInStack, location0.GetKind());
+ ASSERT_EQ(Kind::kConstant, location1.GetKind());
+ ASSERT_EQ(Kind::kInStack, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kConstantLargeValue, location1.GetInternalKind());
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo());
+ ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
}
TEST(StackMapTest, Test2) {
@@ -86,8 +124,8 @@
sp_mask1.SetBit(4);
size_t number_of_dex_registers = 2;
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInStack, 0);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddDexRegisterEntry(0, Kind::kInStack, 0); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
stream.AddInlineInfoEntry(42);
stream.AddInlineInfoEntry(82);
@@ -95,8 +133,8 @@
sp_mask2.SetBit(3);
sp_mask1.SetBit(8);
stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInRegister, 18);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kInFpuRegister, 3);
+ stream.AddDexRegisterEntry(0, Kind::kInRegister, 18); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kInFpuRegister, 3); // Short location.
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -107,34 +145,67 @@
ASSERT_EQ(1u, code_info.GetStackMaskSize());
ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
+ uint32_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ ASSERT_EQ(4u, number_of_location_catalog_entries);
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ // The Dex register location catalog contains:
+ // - three 1-byte short Dex register locations, and
+ // - one 5-byte large Dex register location.
+ size_t expected_location_catalog_size = 3u * 1u + 5u;
+ ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
+
// First stack map.
{
StackMap stack_map = code_info.GetStackMapAt(0);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
- ASSERT_EQ(0u, stack_map.GetDexPc());
- ASSERT_EQ(64u, stack_map.GetNativePcOffset());
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask());
+ ASSERT_EQ(0u, stack_map.GetDexPc(code_info));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info));
- MemoryRegion stack_mask = stack_map.GetStackMask();
+ MemoryRegion stack_mask = stack_map.GetStackMask(code_info);
ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
- ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_registers =
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
+ DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(7u, dex_registers.Size());
- DexRegisterLocation location0 =
- dex_registers.GetLocationKindAndValue(0, number_of_dex_registers);
- DexRegisterLocation location1 =
- dex_registers.GetLocationKindAndValue(1, number_of_dex_registers);
- ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kConstant, location1.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetInternalKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kConstantLargeValue, location1.GetInternalKind());
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask, and
+ // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+ size_t expected_dex_register_map_size = 1u + 1u;
+ ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+ ASSERT_EQ(Kind::kInStack,
+ dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstant,
+ dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInStack,
+ dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstantLargeValue,
+ dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
+
+ size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+ 0, number_of_dex_registers, number_of_location_catalog_entries);
+ size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+ 1, number_of_dex_registers, number_of_location_catalog_entries);
+ ASSERT_EQ(0u, index0);
+ ASSERT_EQ(1u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kInStack, location0.GetKind());
+ ASSERT_EQ(Kind::kConstant, location1.GetKind());
+ ASSERT_EQ(Kind::kInStack, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kConstantLargeValue, location1.GetInternalKind());
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_TRUE(stack_map.HasInlineInfo());
+ ASSERT_TRUE(stack_map.HasInlineInfo(code_info));
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
ASSERT_EQ(2u, inline_info.GetDepth());
ASSERT_EQ(42u, inline_info.GetMethodReferenceIndexAtDepth(0));
@@ -146,29 +217,52 @@
StackMap stack_map = code_info.GetStackMapAt(1);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u)));
- ASSERT_EQ(1u, stack_map.GetDexPc());
- ASSERT_EQ(128u, stack_map.GetNativePcOffset());
- ASSERT_EQ(0xFFu, stack_map.GetRegisterMask());
+ ASSERT_EQ(1u, stack_map.GetDexPc(code_info));
+ ASSERT_EQ(128u, stack_map.GetNativePcOffset(code_info));
+ ASSERT_EQ(0xFFu, stack_map.GetRegisterMask(code_info));
- MemoryRegion stack_mask = stack_map.GetStackMask();
+ MemoryRegion stack_mask = stack_map.GetStackMask(code_info);
ASSERT_TRUE(SameBits(stack_mask, sp_mask2));
- ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_registers =
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
+ DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(3u, dex_registers.Size());
- DexRegisterLocation location0 =
- dex_registers.GetLocationKindAndValue(0, number_of_dex_registers);
- DexRegisterLocation location1 =
- dex_registers.GetLocationKindAndValue(1, number_of_dex_registers);
- ASSERT_EQ(DexRegisterLocation::Kind::kInRegister, location0.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kInFpuRegister, location1.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kInRegister, location0.GetInternalKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kInFpuRegister, location1.GetInternalKind());
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask, and
+ // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+ size_t expected_dex_register_map_size = 1u + 1u;
+ ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+ ASSERT_EQ(Kind::kInRegister,
+ dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInFpuRegister,
+ dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInRegister,
+ dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInFpuRegister,
+ dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(18, dex_register_map.GetMachineRegister(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(3, dex_register_map.GetMachineRegister(1, number_of_dex_registers, code_info));
+
+ size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+ 0, number_of_dex_registers, number_of_location_catalog_entries);
+ size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+ 1, number_of_dex_registers, number_of_location_catalog_entries);
+ ASSERT_EQ(2u, index0);
+ ASSERT_EQ(3u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kInRegister, location0.GetKind());
+ ASSERT_EQ(Kind::kInFpuRegister, location1.GetKind());
+ ASSERT_EQ(Kind::kInRegister, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kInFpuRegister, location1.GetInternalKind());
ASSERT_EQ(18, location0.GetValue());
ASSERT_EQ(3, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo());
+ ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
}
}
@@ -180,8 +274,8 @@
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 2;
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kNone, 0);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddDexRegisterEntry(0, Kind::kNone, 0); // No location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -189,15 +283,211 @@
stream.FillIn(region);
CodeInfo code_info(region);
+ ASSERT_EQ(0u, code_info.GetStackMaskSize());
+ ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+
+ uint32_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ ASSERT_EQ(1u, number_of_location_catalog_entries);
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ // The Dex register location catalog contains:
+ // - one 5-byte large Dex register location.
+ size_t expected_location_catalog_size = 5u;
+ ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
+
StackMap stack_map = code_info.GetStackMapAt(0);
- ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
- ASSERT_EQ(DexRegisterLocation::Kind::kNone,
- dex_registers.GetLocationKind(0, number_of_dex_registers));
- ASSERT_EQ(DexRegisterLocation::Kind::kConstant,
- dex_registers.GetLocationKind(1, number_of_dex_registers));
- ASSERT_EQ(-2, dex_registers.GetConstant(1, number_of_dex_registers));
- ASSERT_FALSE(stack_map.HasInlineInfo());
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+ ASSERT_EQ(0u, stack_map.GetDexPc(code_info));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info));
+
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ ASSERT_FALSE(dex_register_map.IsDexRegisterLive(0));
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ ASSERT_EQ(1u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask.
+ // No space is allocated for the sole location catalog entry index, as it is useless.
+ size_t expected_dex_register_map_size = 1u + 0u;
+ ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+ ASSERT_EQ(Kind::kNone,
+ dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstant,
+ dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kNone,
+ dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstantLargeValue,
+ dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
+
+ size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+ 0, number_of_dex_registers, number_of_location_catalog_entries);
+ size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+ 1, number_of_dex_registers, number_of_location_catalog_entries);
+ ASSERT_EQ(DexRegisterLocationCatalog::kNoLocationEntryIndex, index0);
+ ASSERT_EQ(0u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kNone, location0.GetKind());
+ ASSERT_EQ(Kind::kConstant, location1.GetKind());
+ ASSERT_EQ(Kind::kNone, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kConstantLargeValue, location1.GetInternalKind());
+ ASSERT_EQ(0, location0.GetValue());
+ ASSERT_EQ(-2, location1.GetValue());
+
+ ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
+}
+
+// Generate a stack map whose dex register offset is
+// StackMap::kNoDexRegisterMapSmallEncoding, and ensure we do
+// not treat it as kNoDexRegisterMap.
+TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream stream(&arena);
+
+ ArenaBitVector sp_mask(&arena, 0, false);
+ uint32_t number_of_dex_registers = 1024;
+ // Create the first stack map (and its Dex register map).
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ uint32_t number_of_dex_live_registers_in_dex_register_map_0 = number_of_dex_registers - 8;
+ for (uint32_t i = 0; i < number_of_dex_live_registers_in_dex_register_map_0; ++i) {
+ // Use two different Dex register locations to populate this map,
+ // as using a single value (in the whole CodeInfo object) would
+ // make this Dex register mapping data empty (see
+ // art::DexRegisterMap::SingleEntrySizeInBits).
+ stream.AddDexRegisterEntry(i, Kind::kConstant, i % 2); // Short location.
+ }
+ // Create the second stack map (and its Dex register map).
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ for (uint32_t i = 0; i < number_of_dex_registers; ++i) {
+ stream.AddDexRegisterEntry(i, Kind::kConstant, 0); // Short location.
+ }
+
+ size_t size = stream.ComputeNeededSize();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo code_info(region);
+ // The location catalog contains two entries (DexRegisterLocation(kConstant, 0)
+ // and DexRegisterLocation(kConstant, 1)), therefore the location catalog index
+ // has a size of 1 bit.
+ uint32_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ ASSERT_EQ(2u, number_of_location_catalog_entries);
+ ASSERT_EQ(1u, DexRegisterMap::SingleEntrySizeInBits(number_of_location_catalog_entries));
+
+ // The first Dex register map contains:
+ // - a live register bit mask for 1024 registers (that is, 128 bytes of
+ // data); and
+ // - Dex register mapping information for 1016 1-bit Dex (live) register
+ // locations (that is, 127 bytes of data).
+ // Hence it has a size of 255 bytes, and therefore...
+ ASSERT_EQ(128u, DexRegisterMap::GetLiveBitMaskSize(number_of_dex_registers));
+ StackMap stack_map0 = code_info.GetStackMapAt(0);
+ DexRegisterMap dex_register_map0 =
+ code_info.GetDexRegisterMapOf(stack_map0, number_of_dex_registers);
+ ASSERT_EQ(127u, dex_register_map0.GetLocationMappingDataSize(number_of_dex_registers,
+ number_of_location_catalog_entries));
+ ASSERT_EQ(255u, dex_register_map0.Size());
+
+ StackMap stack_map1 = code_info.GetStackMapAt(1);
+ ASSERT_TRUE(stack_map1.HasDexRegisterMap(code_info));
+ // ...the offset of the second Dex register map (relative to the
+ // beginning of the Dex register maps region) is 255 (i.e.,
+ // kNoDexRegisterMapSmallEncoding).
+ ASSERT_NE(stack_map1.GetDexRegisterMapOffset(code_info), StackMap::kNoDexRegisterMap);
+ ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(code_info), 0xFFu);
+}
+
+TEST(StackMapTest, TestShareDexRegisterMap) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream stream(&arena);
+
+ ArenaBitVector sp_mask(&arena, 0, false);
+ uint32_t number_of_dex_registers = 2;
+ // First stack map.
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(0, Kind::kInRegister, 0); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+ // Second stack map, which should share the same dex register map.
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(0, Kind::kInRegister, 0); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+ // Third stack map (doesn't share the dex register map).
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(0, Kind::kInRegister, 2); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+
+ size_t size = stream.ComputeNeededSize();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo ci(region);
+ // Verify first stack map.
+ StackMap sm0 = ci.GetStackMapAt(0);
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, number_of_dex_registers);
+ ASSERT_EQ(0, dex_registers0.GetMachineRegister(0, number_of_dex_registers, ci));
+ ASSERT_EQ(-2, dex_registers0.GetConstant(1, number_of_dex_registers, ci));
+
+ // Verify second stack map.
+ StackMap sm1 = ci.GetStackMapAt(1);
+ DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1, number_of_dex_registers);
+ ASSERT_EQ(0, dex_registers1.GetMachineRegister(0, number_of_dex_registers, ci));
+ ASSERT_EQ(-2, dex_registers1.GetConstant(1, number_of_dex_registers, ci));
+
+ // Verify third stack map.
+ StackMap sm2 = ci.GetStackMapAt(2);
+ DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2, number_of_dex_registers);
+ ASSERT_EQ(2, dex_registers2.GetMachineRegister(0, number_of_dex_registers, ci));
+ ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci));
+
+ // Verify dex register map offsets.
+ ASSERT_EQ(sm0.GetDexRegisterMapOffset(ci), sm1.GetDexRegisterMapOffset(ci));
+ ASSERT_NE(sm0.GetDexRegisterMapOffset(ci), sm2.GetDexRegisterMapOffset(ci));
+ ASSERT_NE(sm1.GetDexRegisterMapOffset(ci), sm2.GetDexRegisterMapOffset(ci));
+}
+
+TEST(StackMapTest, TestNoDexRegisterMap) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream stream(&arena);
+
+ ArenaBitVector sp_mask(&arena, 0, false);
+ uint32_t number_of_dex_registers = 0;
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+
+ size_t size = stream.ComputeNeededSize();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo code_info(region);
+ ASSERT_EQ(0u, code_info.GetStackMaskSize());
+ ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+
+ uint32_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ ASSERT_EQ(0u, number_of_location_catalog_entries);
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ ASSERT_EQ(0u, location_catalog.Size());
+
+ StackMap stack_map = code_info.GetStackMapAt(0);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+ ASSERT_EQ(0u, stack_map.GetDexPc(code_info));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info));
+
+ ASSERT_FALSE(stack_map.HasDexRegisterMap(code_info));
+ ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
}
} // namespace art
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 8730f52..dd0dba2 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -100,6 +100,10 @@
return rm_;
}
+ Register GetSecondRegister() const {
+ return rs_;
+ }
+
enum Type {
kUnknown = -1,
kRegister,
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 6d0571e..a894319 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -825,10 +825,12 @@
if (so.IsImmediate()) {
// Check special cases.
if ((opcode == SUB || opcode == ADD) && (so.GetImmediate() < (1u << 12))) {
- if (opcode == SUB) {
- thumb_opcode = 5U /* 0b0101 */;
- } else {
- thumb_opcode = 0;
+ if (!set_cc) {
+ if (opcode == SUB) {
+ thumb_opcode = 5U;
+ } else if (opcode == ADD) {
+ thumb_opcode = 0U;
+ }
}
uint32_t imm = so.GetImmediate();
@@ -836,13 +838,14 @@
uint32_t imm3 = (imm >> 8) & 7U /* 0b111 */;
uint32_t imm8 = imm & 0xff;
- encoding = B31 | B30 | B29 | B28 | B25 |
- thumb_opcode << 21 |
- rn << 16 |
- rd << 8 |
- i << 26 |
- imm3 << 12 |
- imm8;
+ encoding = B31 | B30 | B29 | B28 |
+ (set_cc ? B20 : B25) |
+ thumb_opcode << 21 |
+ rn << 16 |
+ rd << 8 |
+ i << 26 |
+ imm3 << 12 |
+ imm8;
} else {
// Modified immediate.
uint32_t imm = ModifiedImmediate(so.encodingThumb());
@@ -852,19 +855,19 @@
}
encoding = B31 | B30 | B29 | B28 |
thumb_opcode << 21 |
- (set_cc ? 1 : 0) << 20 |
+ (set_cc ? B20 : 0) |
rn << 16 |
rd << 8 |
imm;
}
} else if (so.IsRegister()) {
- // Register (possibly shifted)
- encoding = B31 | B30 | B29 | B27 | B25 |
- thumb_opcode << 21 |
- (set_cc ? 1 : 0) << 20 |
- rn << 16 |
- rd << 8 |
- so.encodingThumb();
+ // Register (possibly shifted)
+ encoding = B31 | B30 | B29 | B27 | B25 |
+ thumb_opcode << 21 |
+ (set_cc ? B20 : 0) |
+ rn << 16 |
+ rd << 8 |
+ so.encodingThumb();
}
Emit32(encoding);
}
@@ -921,6 +924,8 @@
use_immediate = true;
immediate = so.GetImmediate();
} else {
+ CHECK(!(so.IsRegister() && so.IsShift() && so.GetSecondRegister() != kNoRegister))
+ << "No register-shifted register instruction available in thumb";
// Adjust rn and rd: only two registers will be emitted.
switch (opcode) {
case AND:
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index ebea9d4..813996b 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -227,4 +227,24 @@
DriverStr(expected, "abs");
}
+TEST_F(AssemblerThumb2Test, sub) {
+ __ subs(arm::R1, arm::R0, arm::ShifterOperand(42));
+ __ sub(arm::R1, arm::R0, arm::ShifterOperand(42));
+
+ const char* expected =
+ "subs r1, r0, #42\n"
+ "subw r1, r0, #42\n";
+ DriverStr(expected, "sub");
+}
+
+TEST_F(AssemblerThumb2Test, add) {
+ __ adds(arm::R1, arm::R0, arm::ShifterOperand(42));
+ __ add(arm::R1, arm::R0, arm::ShifterOperand(42));
+
+ const char* expected =
+ "adds r1, r0, #42\n"
+ "addw r1, r0, #42\n";
+ DriverStr(expected, "add");
+}
+
} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 90170ce..5773459 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -146,6 +146,12 @@
EmitLabel(lbl, dst.length_ + 5);
}
+void X86Assembler::bswapl(Register dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xC8 + dst);
+}
+
void X86Assembler::movzxb(Register dst, ByteRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -725,6 +731,32 @@
}
+void X86Assembler::andps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x54);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::andpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x54);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::orpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x56);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
void X86Assembler::xorps(XmmRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -733,6 +765,14 @@
}
+void X86Assembler::orps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x56);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
void X86Assembler::xorps(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -741,6 +781,14 @@
}
+void X86Assembler::andps(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x54);
+ EmitOperand(dst, src);
+}
+
+
void X86Assembler::andpd(XmmRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1090,6 +1138,13 @@
}
+void X86Assembler::subl(const Address& address, Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x29);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::cdq() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x99);
@@ -1175,6 +1230,13 @@
}
+void X86Assembler::sbbl(const Address& address, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x19);
+ EmitOperand(src, address);
+}
+
+
void X86Assembler::incl(Register reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x40 + reg);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 4d20db0..6ccf2e3 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -231,6 +231,8 @@
void movl(const Address& dst, const Immediate& imm);
void movl(const Address& dst, Label* lbl);
+ void bswapl(Register dst);
+
void movzxb(Register dst, ByteRegister src);
void movzxb(Register dst, const Address& src);
void movsxb(Register dst, ByteRegister src);
@@ -318,7 +320,13 @@
void xorps(XmmRegister dst, const Address& src);
void xorps(XmmRegister dst, XmmRegister src);
+ void andpd(XmmRegister dst, XmmRegister src);
void andpd(XmmRegister dst, const Address& src);
+ void andps(XmmRegister dst, XmmRegister src);
+ void andps(XmmRegister dst, const Address& src);
+
+ void orpd(XmmRegister dst, XmmRegister src);
+ void orps(XmmRegister dst, XmmRegister src);
void flds(const Address& src);
void fstps(const Address& dst);
@@ -389,6 +397,7 @@
void subl(Register dst, Register src);
void subl(Register reg, const Immediate& imm);
void subl(Register reg, const Address& address);
+ void subl(const Address& address, Register src);
void cdq();
@@ -407,6 +416,7 @@
void sbbl(Register dst, Register src);
void sbbl(Register reg, const Immediate& imm);
void sbbl(Register reg, const Address& address);
+ void sbbl(const Address& address, Register src);
void incl(Register reg);
void incl(const Address& address);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index f2704b7..bd155ed 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1277,6 +1277,14 @@
}
+void X86_64Assembler::orq(CpuRegister dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int32()); // orq only supports 32b immediate.
+ EmitRex64(dst);
+ EmitComplex(1, Operand(dst), imm);
+}
+
+
void X86_64Assembler::orq(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(dst, src);
@@ -1548,27 +1556,30 @@
void X86_64Assembler::imulq(CpuRegister reg, const Immediate& imm) {
+ imulq(reg, reg, imm);
+}
+
+void X86_64Assembler::imulq(CpuRegister dst, CpuRegister reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_int32()); // imulq only supports 32b immediate.
- EmitRex64(reg, reg);
+ EmitRex64(dst, reg);
// See whether imm can be represented as a sign-extended 8bit value.
int64_t v64 = imm.value();
if (IsInt<8>(v64)) {
// Sign-extension works.
EmitUint8(0x6B);
- EmitOperand(reg.LowBits(), Operand(reg));
+ EmitOperand(dst.LowBits(), Operand(reg));
EmitUint8(static_cast<uint8_t>(v64 & 0xFF));
} else {
// Not representable, use full immediate.
EmitUint8(0x69);
- EmitOperand(reg.LowBits(), Operand(reg));
+ EmitOperand(dst.LowBits(), Operand(reg));
EmitImmediate(imm);
}
}
-
void X86_64Assembler::imulq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(reg, address);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 5dfcf45..495f74f 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -429,6 +429,7 @@
void orl(CpuRegister dst, CpuRegister src);
void orl(CpuRegister reg, const Address& address);
void orq(CpuRegister dst, CpuRegister src);
+ void orq(CpuRegister dst, const Immediate& imm);
void xorl(CpuRegister dst, CpuRegister src);
void xorl(CpuRegister dst, const Immediate& imm);
@@ -467,6 +468,7 @@
void imulq(CpuRegister dst, CpuRegister src);
void imulq(CpuRegister reg, const Immediate& imm);
void imulq(CpuRegister reg, const Address& address);
+ void imulq(CpuRegister dst, CpuRegister reg, const Immediate& imm);
void imull(CpuRegister reg);
void imull(const Address& address);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index bb80a70..2e1b7ae 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -109,9 +109,14 @@
UsageError("Usage: dex2oat [options]...");
UsageError("");
- UsageError(" --dex-file=<dex-file>: specifies a .dex file to compile.");
+ UsageError(" --dex-file=<dex-file>: specifies a .dex, .jar, or .apk file to compile.");
UsageError(" Example: --dex-file=/system/framework/core.jar");
UsageError("");
+ UsageError(" --dex-location=<dex-location>: specifies an alternative dex location to");
+ UsageError(" encode in the oat file for the corresponding --dex-file argument.");
+ UsageError(" Example: --dex-file=/home/build/out/system/framework/core.jar");
+ UsageError(" --dex-location=/system/framework/core.jar");
+ UsageError("");
UsageError(" --zip-fd=<file-descriptor>: specifies a file descriptor of a zip file");
UsageError(" containing a classes.dex file to compile.");
UsageError(" Example: --zip-fd=5");
@@ -494,6 +499,7 @@
bool watch_dog_enabled = true;
bool generate_gdb_information = kIsDebugBuild;
bool abort_on_hard_verifier_error = false;
+ bool requested_specific_compiler = false;
PassManagerOptions pass_manager_options;
@@ -603,6 +609,7 @@
Usage("Error parsing '%s': %s", option.data(), error_msg.c_str());
}
} else if (option.starts_with("--compiler-backend=")) {
+ requested_specific_compiler = true;
StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
if (backend_str == "Quick") {
compiler_kind_ = Compiler::kQuick;
@@ -741,6 +748,13 @@
}
}
+ image_ = (!image_filename_.empty());
+ if (!requested_specific_compiler && !kUseOptimizingCompiler) {
+ // If no specific compiler is requested, the current behavior is
+ // to compile the boot image with Quick, and the rest with Optimizing.
+ compiler_kind_ = image_ ? Compiler::kQuick : Compiler::kOptimizing;
+ }
+
if (compiler_kind_ == Compiler::kOptimizing) {
// Optimizing only supports PIC mode.
compile_pic = true;
@@ -774,7 +788,6 @@
android_root_ += android_root_env_var;
}
- image_ = (!image_filename_.empty());
if (!image_ && boot_image_filename.empty()) {
boot_image_filename += android_root_;
boot_image_filename += "/framework/boot.art";
@@ -868,15 +881,7 @@
}
if (compiler_filter_string == nullptr) {
- if (instruction_set_ == kMips &&
- reinterpret_cast<const MipsInstructionSetFeatures*>(instruction_set_features_.get())->
- IsR6()) {
- // For R6, only interpreter mode is working.
- // TODO: fix compiler for Mips32r6.
- compiler_filter_string = "interpret-only";
- } else {
- compiler_filter_string = "speed";
- }
+ compiler_filter_string = "speed";
}
CHECK(compiler_filter_string != nullptr);
@@ -885,6 +890,8 @@
compiler_filter = CompilerOptions::kVerifyNone;
} else if (strcmp(compiler_filter_string, "interpret-only") == 0) {
compiler_filter = CompilerOptions::kInterpretOnly;
+ } else if (strcmp(compiler_filter_string, "verify-at-runtime") == 0) {
+ compiler_filter = CompilerOptions::kVerifyAtRuntime;
} else if (strcmp(compiler_filter_string, "space") == 0) {
compiler_filter = CompilerOptions::kSpace;
} else if (strcmp(compiler_filter_string, "balanced") == 0) {
@@ -1046,7 +1053,12 @@
}
verification_results_.reset(new VerificationResults(compiler_options_.get()));
- callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(), &method_inliner_map_));
+ callbacks_.reset(new QuickCompilerCallbacks(
+ verification_results_.get(),
+ &method_inliner_map_,
+ image_ ?
+ CompilerCallbacks::CallbackMode::kCompileBootImage :
+ CompilerCallbacks::CallbackMode::kCompileApp));
runtime_options.push_back(std::make_pair("compilercallbacks", callbacks_.get()));
runtime_options.push_back(
std::make_pair("imageinstructionset", GetInstructionSetString(instruction_set_)));
@@ -1217,19 +1229,16 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
OpenClassPathFiles(runtime_->GetClassPathString(), dex_files_, &class_path_files_);
ScopedObjectAccess soa(self);
- std::vector<const DexFile*> class_path_files(dex_files_);
+
+ // Classpath: first the class-path given.
+ std::vector<const DexFile*> class_path_files;
for (auto& class_path_file : class_path_files_) {
class_path_files.push_back(class_path_file.get());
}
+ // Then the dex files we'll compile. Thus we'll resolve the class-path first.
+ class_path_files.insert(class_path_files.end(), dex_files_.begin(), dex_files_.end());
- for (size_t i = 0; i < class_path_files.size(); i++) {
- class_linker->RegisterDexFile(*class_path_files[i]);
- }
- soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader);
- ScopedLocalRef<jobject> class_loader_local(soa.Env(),
- soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
- class_loader = soa.Env()->NewGlobalRef(class_loader_local.get());
- Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path_files);
+ class_loader = class_linker->CreatePathClassLoader(self, class_path_files);
}
driver_.reset(new CompilerDriver(compiler_options_.get(),
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 3d8a567..b27b555 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -71,10 +71,14 @@
{ kRTypeMask, 17, "mthi", "S", },
{ kRTypeMask, 18, "mflo", "D", },
{ kRTypeMask, 19, "mtlo", "S", },
- { kRTypeMask, 24, "mult", "ST", },
- { kRTypeMask, 25, "multu", "ST", },
- { kRTypeMask, 26, "div", "ST", },
- { kRTypeMask, 27, "divu", "ST", },
+ { kRTypeMask | (0x1f << 6), 24, "mult", "ST", },
+ { kRTypeMask | (0x1f << 6), 25, "multu", "ST", },
+ { kRTypeMask | (0x1f << 6), 26, "div", "ST", },
+ { kRTypeMask | (0x1f << 6), 27, "divu", "ST", },
+ { kRTypeMask | (0x1f << 6), 24 + (2 << 6), "mul", "DST", },
+ { kRTypeMask | (0x1f << 6), 24 + (3 << 6), "muh", "DST", },
+ { kRTypeMask | (0x1f << 6), 26 + (2 << 6), "div", "DST", },
+ { kRTypeMask | (0x1f << 6), 26 + (3 << 6), "mod", "DST", },
{ kRTypeMask, 32, "add", "DST", },
{ kRTypeMask, 33, "addu", "DST", },
{ kRTypeMask, 34, "sub", "DST", },
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index c27b3d4..322d3aa 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1039,62 +1039,11 @@
}
}
- void DumpRegisterMapping(std::ostream& os,
- size_t dex_register_num,
- DexRegisterLocation::Kind kind,
- int32_t value,
- const std::string& prefix = "v",
- const std::string& suffix = "") {
- os << " " << prefix << dex_register_num << ": "
- << DexRegisterLocation::PrettyDescriptor(kind)
- << " (" << value << ")" << suffix << '\n';
- }
-
- void DumpStackMapHeader(std::ostream& os, const CodeInfo& code_info, size_t stack_map_num) {
- StackMap stack_map = code_info.GetStackMapAt(stack_map_num);
- os << " StackMap " << stack_map_num
- << std::hex
- << " (dex_pc=0x" << stack_map.GetDexPc()
- << ", native_pc_offset=0x" << stack_map.GetNativePcOffset()
- << ", register_mask=0x" << stack_map.GetRegisterMask()
- << std::dec
- << ", stack_mask=0b";
- MemoryRegion stack_mask = stack_map.GetStackMask();
- for (size_t i = 0, e = stack_mask.size_in_bits(); i < e; ++i) {
- os << stack_mask.LoadBit(e - i - 1);
- }
- os << ")\n";
- };
-
// Display a CodeInfo object emitted by the optimizing compiler.
void DumpCodeInfo(std::ostream& os,
const CodeInfo& code_info,
const DexFile::CodeItem& code_item) {
- uint16_t number_of_dex_registers = code_item.registers_size_;
- uint32_t code_info_size = code_info.GetOverallSize();
- size_t number_of_stack_maps = code_info.GetNumberOfStackMaps();
- os << " Optimized CodeInfo (size=" << code_info_size
- << ", number_of_dex_registers=" << number_of_dex_registers
- << ", number_of_stack_maps=" << number_of_stack_maps << ")\n";
-
- // Display stack maps along with Dex register maps.
- for (size_t i = 0; i < number_of_stack_maps; ++i) {
- StackMap stack_map = code_info.GetStackMapAt(i);
- DumpStackMapHeader(os, code_info, i);
- if (stack_map.HasDexRegisterMap()) {
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- // TODO: Display the bit mask of live Dex registers.
- for (size_t j = 0; j < number_of_dex_registers; ++j) {
- if (dex_register_map.IsDexRegisterLive(j)) {
- DexRegisterLocation location =
- dex_register_map.GetLocationKindAndValue(j, number_of_dex_registers);
- DumpRegisterMapping(os, j, location.GetInternalKind(), location.GetValue());
- }
- }
- }
- }
- // TODO: Dump the stack map's inline information.
+ code_info.Dump(os, code_item.registers_size_);
}
// Display a vmap table.
@@ -1504,7 +1453,9 @@
std::string error_msg;
const OatFile* oat_file = class_linker->FindOpenedOatFileFromOatLocation(oat_location);
if (oat_file == nullptr) {
- oat_file = OatFile::Open(oat_location, oat_location, nullptr, nullptr, false, &error_msg);
+ oat_file = OatFile::Open(oat_location, oat_location,
+ nullptr, nullptr, false, nullptr,
+ &error_msg);
if (oat_file == nullptr) {
os << "NOT FOUND: " << error_msg << "\n";
return false;
@@ -1646,7 +1597,7 @@
os << StringPrintf("null %s\n", PrettyDescriptor(field->GetTypeDescriptor()).c_str());
} else {
// Grab the field type without causing resolution.
- mirror::Class* field_type = field->GetType(false);
+ mirror::Class* field_type = field->GetType<false>();
if (field_type != nullptr) {
PrettyObjectValue(os, field_type, value);
} else {
@@ -2207,16 +2158,12 @@
}
// Need a class loader.
- soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader);
- ScopedLocalRef<jobject> class_loader_local(soa.Env(),
- soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
- jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get());
// Fake that we're a compiler.
std::vector<const DexFile*> class_path;
for (auto& dex_file : dex_files) {
class_path.push_back(dex_file.get());
}
- runtime->SetCompileTimeClassPath(class_loader, class_path);
+ jobject class_loader = class_linker->CreatePathClassLoader(self, class_path);
// Use the class loader while dumping.
StackHandleScope<1> scope(self);
@@ -2244,7 +2191,7 @@
std::ostream* os) {
std::string error_msg;
OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, nullptr, nullptr, false,
- &error_msg);
+ nullptr, &error_msg);
if (oat_file == nullptr) {
fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
@@ -2260,7 +2207,7 @@
static int SymbolizeOat(const char* oat_filename, std::string& output_name) {
std::string error_msg;
OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, nullptr, nullptr, false,
- &error_msg);
+ nullptr, &error_msg);
if (oat_file == nullptr) {
fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 6588288..dde5407 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -103,6 +103,7 @@
mirror/array.cc \
mirror/class.cc \
mirror/dex_cache.cc \
+ mirror/field.cc \
mirror/object.cc \
mirror/reference.cc \
mirror/stack_trace_element.cc \
@@ -151,6 +152,7 @@
runtime_options.cc \
signal_catcher.cc \
stack.cc \
+ stack_map.cc \
thread.cc \
thread_list.cc \
thread_pool.cc \
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index 1fd1dea..db4b0b1 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -250,7 +250,11 @@
}
first = true;
}
- DCHECK_EQ(use_default, features.empty());
+ // Expectation: "default" is standalone, no other flags. But an empty features vector after
+ // processing can also come along if the handled flags (at the moment only smp) are the only
+ // ones in the list. So logically, we check "default -> features.empty."
+ DCHECK(!use_default || features.empty());
+
return AddFeaturesFromSplitString(smp, features, error_msg);
}
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index 00ab613..93d79b7 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -24,13 +24,56 @@
namespace art {
+// An enum for the Mips revision.
+enum class MipsLevel {
+ kBase,
+ kR2,
+ kR5,
+ kR6
+};
+
+#if defined(_MIPS_ARCH_MIPS32R6)
+static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kR6;
+#elif defined(_MIPS_ARCH_MIPS32R5)
+static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kR5;
+#elif defined(_MIPS_ARCH_MIPS32R2)
+static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kR2;
+#else
+static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kBase;
+#endif
+
+static void GetFlagsFromCppDefined(bool* mips_isa_gte2, bool* r6, bool* fpu_32bit) {
+ // Override defaults based on compiler flags.
+ if (kRuntimeMipsLevel >= MipsLevel::kR2) {
+ *mips_isa_gte2 = true;
+ } else {
+ *mips_isa_gte2 = false;
+ }
+
+ if (kRuntimeMipsLevel >= MipsLevel::kR5) {
+ *fpu_32bit = false;
+ } else {
+ *fpu_32bit = true;
+ }
+
+ if (kRuntimeMipsLevel >= MipsLevel::kR6) {
+ *r6 = true;
+ } else {
+ *r6 = false;
+ }
+}
+
const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
bool smp = true; // Conservative default.
- bool fpu_32bit = true;
- bool mips_isa_gte2 = false;
- bool r6 = false;
+
+ // Override defaults based on compiler flags.
+ // This is needed when running ART test where the variant is not defined.
+ bool fpu_32bit;
+ bool mips_isa_gte2;
+ bool r6;
+ GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit);
// Override defaults based on variant string.
// Only care if it is R1, R2 or R6 and we assume all CPUs will have a FP unit.
@@ -67,19 +110,11 @@
const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCppDefines() {
// Assume conservative defaults.
const bool smp = true;
- bool fpu_32bit = true;
- bool mips_isa_gte2 = false;
- bool r6 = false;
- // Override defaults based on compiler flags.
-#if (_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R5) || defined(_MIPS_ARCH_MIPS32R6)
- mips_isa_gte2 = true;
-#endif
-
-#if defined(_MIPS_ARCH_MIPS32R6)
- r6 = true;
- fpu_32bit = false;
-#endif
+ bool fpu_32bit;
+ bool mips_isa_gte2;
+ bool r6;
+ GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit);
return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
}
@@ -89,19 +124,11 @@
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
// Assume conservative defaults.
bool smp = false;
- bool fpu_32bit = true;
- bool mips_isa_gte2 = false;
- bool r6 = false;
- // Override defaults based on compiler flags.
-#if (_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R5) || defined(_MIPS_ARCH_MIPS32R6)
- mips_isa_gte2 = true;
-#endif
-
-#if defined(_MIPS_ARCH_MIPS32R6)
- r6 = true;
- fpu_32bit = false;
-#endif
+ bool fpu_32bit;
+ bool mips_isa_gte2;
+ bool r6;
+ GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit);
std::ifstream in("/proc/cpuinfo");
if (!in.fail()) {
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 16f0e70..0c2250e 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1094,7 +1094,7 @@
lw $a0, ARG_SLOT_SIZE($sp) # load resolved method to $a0
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
move $t9, $v0 # code pointer must be in $t9 to generate the global pointer
- jalr $zero, $v0 # tail call to method
+ jalr $zero, $t9 # tail call to method
nop
1:
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
@@ -1203,29 +1203,28 @@
.cpload $t9
move $ra, $zero # link register is to here, so clobber with 0 for later checks
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
addiu $sp, $sp, -16 # allocate temp storage on the stack
.cfi_adjust_cfa_offset 16
- sw $v0, 12($sp)
- .cfi_rel_offset 2, 32
- sw $v1, 8($sp)
- .cfi_rel_offset 3, 36
- s.d $f0, 0($sp)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ sw $v0, ARG_SLOT_SIZE+12($sp)
+ .cfi_rel_offset 2, ARG_SLOT_SIZE+12
+ sw $v1, ARG_SLOT_SIZE+8($sp)
+ .cfi_rel_offset 3, ARG_SLOT_SIZE+8
+ s.d $f0, ARG_SLOT_SIZE($sp)
s.d $f0, 16($sp) # pass fpr result
move $a2, $v0 # pass gpr result
move $a3, $v1
- addiu $a1, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
+ addiu $a1, $sp, ARG_SLOT_SIZE+16 # pass $sp (remove arg slots and temp storage)
jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res)
move $a0, rSELF # pass Thread::Current
- move $t0, $v0 # set aside returned link register
+ move $t9, $v0 # set aside returned link register
move $ra, $v1 # set link register for deoptimization
- addiu $sp, $sp, ARG_SLOT_SIZE+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE # args slot + refs_only callee save frame
- lw $v0, 12($sp) # restore return values
- lw $v1, 8($sp)
- l.d $f0, 0($sp)
- jalr $zero, $t0 # return
- addiu $sp, $sp, 16 # remove temp storage from stack
- .cfi_adjust_cfa_offset -16
+ lw $v0, ARG_SLOT_SIZE+12($sp) # restore return values
+ lw $v1, ARG_SLOT_SIZE+8($sp)
+ l.d $f0, ARG_SLOT_SIZE($sp)
+ jalr $zero, $t9 # return
+ addiu $sp, $sp, ARG_SLOT_SIZE+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE+16 # restore stack
+ .cfi_adjust_cfa_offset -(ARG_SLOT_SIZE+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE+16)
END art_quick_instrumentation_exit
/*
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 8cb95f1..697bf00 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1504,11 +1504,11 @@
move $a1, $t0 # pass $sp
jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res)
move $a0, rSELF # pass Thread::Current
- move $t0, $v0 # set aside returned link register
+ move $t9, $v0 # set aside returned link register
move $ra, $v1 # set link register for deoptimization
ld $v0, 0($sp) # restore return values
l.d $f0, 8($sp)
- jalr $zero, $t0 # return
+ jalr $zero, $t9 # return
daddiu $sp, $sp, 16+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE # 16 bytes of saved values + ref_only callee save frame
.cfi_adjust_cfa_offset -(16+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
END art_quick_instrumentation_exit
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 0f874a4..5edcd96 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -883,7 +883,44 @@
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+DEFINE_FUNCTION art_quick_alloc_object_tlab
+ // Fast path tlab allocation.
+ // RDI: uint32_t type_idx, RSI: ArtMethod*
+ // RDX, RCX, R8, R9: free. RAX: return val.
+ movl MIRROR_ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array
+ // Load the class
+ movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdx, %rdi, MIRROR_OBJECT_ARRAY_COMPONENT_SIZE), %edx
+ testl %edx, %edx // Check null class
+ jz .Lart_quick_alloc_object_tlab_slow_path
+ // Check class status.
+ cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
+ jne .Lart_quick_alloc_object_tlab_slow_path
+ // Check access flags has kAccClassIsFinalizable
+ testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
+ jnz .Lart_quick_alloc_object_tlab_slow_path
+ movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %ecx // Load the object size.
+ addl LITERAL(OBJECT_ALIGNMENT_MASK), %ecx // Align the size by 8. (addr + 7) & ~7.
+ andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %ecx
+ movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
+ movq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Load thread_local_pos.
+ addq %rax, %rcx // Add the object size.
+ cmpq THREAD_LOCAL_END_OFFSET(%r8), %rcx // Check if it fits.
+ ja .Lart_quick_alloc_object_tlab_slow_path
+ movq %rcx, THREAD_LOCAL_POS_OFFSET(%r8) // Update thread_local_pos.
+ addq LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increment thread_local_objects.
+ // Store the class pointer in the header.
+ // No fence needed for x86.
+ movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
+ ret // Fast path succeeded.
+.Lart_quick_alloc_object_tlab_slow_path:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ // Outgoing argument set up
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call SYMBOL(artAllocObjectFromCodeTLAB) // cxx_name(arg0, arg1, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO // return or deliver exception
+END_FUNCTION art_quick_alloc_object_tlab
+
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 92f4ebe..b1dbf6f 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -103,6 +103,16 @@
ADD_TEST_EQ(THREAD_SELF_OFFSET,
art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
+#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 125 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
+ art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
+#define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
+ art::Thread::ThreadLocalEndOffset<__SIZEOF_POINTER__>().Int32Value())
+#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
+ art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
+
// Offsets within java.lang.Object.
#define MIRROR_OBJECT_CLASS_OFFSET 0
ADD_TEST_EQ(MIRROR_OBJECT_CLASS_OFFSET, art::mirror::Object::ClassOffset().Int32Value())
@@ -120,6 +130,22 @@
#define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
art::mirror::Class::ComponentTypeOffset().Int32Value())
+#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (52 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
+ art::mirror::Class::AccessFlagsOffset().Int32Value())
+#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (80 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
+ art::mirror::Class::ObjectSizeOffset().Int32Value())
+#define MIRROR_CLASS_STATUS_OFFSET (92 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
+ art::mirror::Class::StatusOffset().Int32Value())
+
+#define MIRROR_CLASS_STATUS_INITIALIZED 10
+ADD_TEST_EQ(static_cast<uint32_t>(MIRROR_CLASS_STATUS_INITIALIZED),
+ static_cast<uint32_t>(art::mirror::Class::kStatusInitialized))
+#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE 0x80000000
+ADD_TEST_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE),
+ static_cast<uint32_t>(kAccClassIsFinalizable))
// Array offsets.
#define MIRROR_ARRAY_LENGTH_OFFSET MIRROR_OBJECT_HEADER_SIZE
@@ -134,6 +160,10 @@
art::mirror::Array::DataOffset(
sizeof(art::mirror::HeapReference<art::mirror::Object>)).Int32Value())
+#define MIRROR_OBJECT_ARRAY_COMPONENT_SIZE 4
+ADD_TEST_EQ(static_cast<size_t>(MIRROR_OBJECT_ARRAY_COMPONENT_SIZE),
+ sizeof(art::mirror::HeapReference<art::mirror::Object>))
+
// Offsets within java.lang.String.
#define MIRROR_STRING_VALUE_OFFSET MIRROR_OBJECT_HEADER_SIZE
ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
@@ -149,6 +179,10 @@
ADD_TEST_EQ(MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET,
art::mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())
+#define MIRROR_ART_METHOD_DEX_CACHE_TYPES_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_DEX_CACHE_TYPES_OFFSET,
+ art::mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())
+
#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32 (36 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value())
@@ -178,6 +212,13 @@
#define LOCK_WORD_THIN_LOCK_COUNT_ONE 65536
ADD_TEST_EQ(LOCK_WORD_THIN_LOCK_COUNT_ONE, static_cast<int32_t>(art::LockWord::kThinLockCountOne))
+#define OBJECT_ALIGNMENT_MASK 7
+ADD_TEST_EQ(static_cast<size_t>(OBJECT_ALIGNMENT_MASK), art::kObjectAlignment - 1)
+
+#define OBJECT_ALIGNMENT_MASK_TOGGLED 0xFFFFFFF8
+ADD_TEST_EQ(static_cast<uint32_t>(OBJECT_ALIGNMENT_MASK_TOGGLED),
+ ~static_cast<uint32_t>(art::kObjectAlignment - 1))
+
#if defined(__cplusplus)
} // End of CheckAsmSupportOffsets.
#endif
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index ceff6e8..e6fe6c0 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -85,8 +85,7 @@
typedef ArenaAllocatorAdapterKindImpl<kArenaAllocatorCountAllocations> ArenaAllocatorAdapterKind;
template <>
-class ArenaAllocatorAdapter<void>
- : private DebugStackReference, private ArenaAllocatorAdapterKind {
+class ArenaAllocatorAdapter<void> : private ArenaAllocatorAdapterKind {
public:
typedef void value_type;
typedef void* pointer;
@@ -99,14 +98,12 @@
explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator,
ArenaAllocKind kind = kArenaAllocSTL)
- : DebugStackReference(arena_allocator),
- ArenaAllocatorAdapterKind(kind),
+ : ArenaAllocatorAdapterKind(kind),
arena_allocator_(arena_allocator) {
}
template <typename U>
ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other)
- : DebugStackReference(other),
- ArenaAllocatorAdapterKind(other),
+ : ArenaAllocatorAdapterKind(other),
arena_allocator_(other.arena_allocator_) {
}
ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default;
@@ -121,7 +118,7 @@
};
template <typename T>
-class ArenaAllocatorAdapter : private DebugStackReference, private ArenaAllocatorAdapterKind {
+class ArenaAllocatorAdapter : private ArenaAllocatorAdapterKind {
public:
typedef T value_type;
typedef T* pointer;
@@ -137,14 +134,12 @@
};
explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, ArenaAllocKind kind)
- : DebugStackReference(arena_allocator),
- ArenaAllocatorAdapterKind(kind),
+ : ArenaAllocatorAdapterKind(kind),
arena_allocator_(arena_allocator) {
}
template <typename U>
ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other)
- : DebugStackReference(other),
- ArenaAllocatorAdapterKind(other),
+ : ArenaAllocatorAdapterKind(other),
arena_allocator_(other.arena_allocator_) {
}
ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default;
diff --git a/runtime/base/hash_map.h b/runtime/base/hash_map.h
index c0f903f..eab80ff 100644
--- a/runtime/base/hash_map.h
+++ b/runtime/base/hash_map.h
@@ -48,7 +48,7 @@
Fn fn_;
};
-template <class Key, class Value, class EmptyFn = DefaultEmptyFn<Key>,
+template <class Key, class Value, class EmptyFn,
class HashFn = std::hash<Key>, class Pred = std::equal_to<Key>,
class Alloc = std::allocator<std::pair<Key, Value>>>
class HashMap : public HashSet<std::pair<Key, Value>, EmptyFn, HashMapWrapper<HashFn>,
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 0ec0295..5d9cd35 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -69,13 +69,13 @@
uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- MemoryRegion stack_mask = stack_map.GetStackMask();
- uint32_t register_mask = stack_map.GetRegisterMask();
+ MemoryRegion stack_mask = stack_map.GetStackMask(code_info);
+ uint32_t register_mask = stack_map.GetRegisterMask(code_info);
for (int i = 0; i < number_of_references; ++i) {
int reg = registers[i];
CHECK(reg < m->GetCodeItem()->registers_size_);
DexRegisterLocation location =
- dex_register_map.GetLocationKindAndValue(reg, number_of_dex_registers);
+ dex_register_map.GetDexRegisterLocation(reg, number_of_dex_registers, code_info);
switch (location.GetKind()) {
case DexRegisterLocation::Kind::kNone:
// Not set, should not be a reference.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 67872d7..a89196d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -56,6 +56,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
+#include "mirror/field.h"
#include "mirror/iftable-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -313,7 +314,7 @@
java_lang_String->SetObjectSize(mirror::String::InstanceSize());
mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusResolved, self);
- // Setup Reference.
+ // Setup java.lang.ref.Reference.
Handle<mirror::Class> java_lang_ref_Reference(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(), mirror::Reference::ClassSize())));
mirror::Reference::SetClass(java_lang_ref_Reference.Get());
@@ -321,7 +322,7 @@
mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusResolved, self);
// Create storage for root classes, save away our work so far (requires descriptors).
- class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class> >(
+ class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(
mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.Get(),
kClassRootsMax));
CHECK(!class_roots_.IsNull());
@@ -531,6 +532,19 @@
mirror::Class* java_lang_reflect_Proxy = FindSystemClass(self, "Ljava/lang/reflect/Proxy;");
SetClassRoot(kJavaLangReflectProxy, java_lang_reflect_Proxy);
+ // Create java.lang.reflect.Field.class root.
+ mirror::Class* java_lang_reflect_Field = FindSystemClass(self, "Ljava/lang/reflect/Field;");
+ CHECK(java_lang_reflect_Field != nullptr);
+ SetClassRoot(kJavaLangReflectField, java_lang_reflect_Field);
+ mirror::Field::SetClass(java_lang_reflect_Field);
+
+ // Create java.lang.reflect.Field array root.
+ mirror::Class* java_lang_reflect_Field_array =
+ FindSystemClass(self, "[Ljava/lang/reflect/Field;");
+ CHECK(java_lang_reflect_Field_array != nullptr);
+ SetClassRoot(kJavaLangReflectFieldArrayClass, java_lang_reflect_Field_array);
+ mirror::Field::SetArrayClass(java_lang_reflect_Field_array);
+
// java.lang.ref classes need to be specially flagged, but otherwise are normal classes
// finish initializing Reference class
mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusNotReady, self);
@@ -818,9 +832,10 @@
VLOG(startup) << "ClassLinker::InitFromImage entering";
CHECK(!init_done_);
- Thread* self = Thread::Current();
- gc::Heap* heap = Runtime::Current()->GetHeap();
- gc::space::ImageSpace* space = heap->GetImageSpace();
+ Runtime* const runtime = Runtime::Current();
+ Thread* const self = Thread::Current();
+ gc::Heap* const heap = runtime->GetHeap();
+ gc::space::ImageSpace* const space = heap->GetImageSpace();
dex_cache_image_class_lookup_required_ = true;
CHECK(space != nullptr);
OatFile& oat_file = GetImageOatFile(space);
@@ -875,7 +890,7 @@
// bitmap walk.
mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod));
size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize();
- if (!Runtime::Current()->IsAotCompiler()) {
+ if (!runtime->IsAotCompiler()) {
// Aot compiler supports having an image with a different pointer size than the runtime. This
// happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may
// also use 32 bit dex2oat on a system with 64 bit apps.
@@ -890,7 +905,6 @@
}
// Set entry point to interpreter if in InterpretOnly mode.
- Runtime* runtime = Runtime::Current();
if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
heap->VisitObjects(InitFromImageInterpretOnlyCallback, this);
}
@@ -903,6 +917,8 @@
array_iftable_ = GcRoot<mirror::IfTable>(GetClassRoot(kObjectArrayClass)->GetIfTable());
DCHECK_EQ(array_iftable_.Read(), GetClassRoot(kBooleanArrayClass)->GetIfTable());
// String class root was set above
+ mirror::Field::SetClass(GetClassRoot(kJavaLangReflectField));
+ mirror::Field::SetArrayClass(GetClassRoot(kJavaLangReflectFieldArrayClass));
mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField));
mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
@@ -1088,6 +1104,8 @@
mirror::Reference::ResetClass();
mirror::ArtField::ResetClass();
mirror::ArtMethod::ResetClass();
+ mirror::Field::ResetClass();
+ mirror::Field::ResetArrayClass();
mirror::BooleanArray::ResetArrayClass();
mirror::ByteArray::ResetArrayClass();
mirror::CharArray::ResetArrayClass();
@@ -1372,38 +1390,6 @@
self->SetException(pre_allocated);
return nullptr;
}
- } else if (Runtime::Current()->UseCompileTimeClassPath()) {
- // First try with the bootstrap class loader.
- if (class_loader.Get() != nullptr) {
- klass = LookupClass(self, descriptor, hash, nullptr);
- if (klass != nullptr) {
- return EnsureResolved(self, descriptor, klass);
- }
- }
- // If the lookup failed search the boot class path. We don't perform a recursive call to avoid
- // a NoClassDefFoundError being allocated.
- ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
- if (pair.second != nullptr) {
- return DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(), *pair.first,
- *pair.second);
- }
- // Next try the compile time class path.
- const std::vector<const DexFile*>* class_path;
- {
- ScopedObjectAccessUnchecked soa(self);
- ScopedLocalRef<jobject> jclass_loader(soa.Env(),
- soa.AddLocalReference<jobject>(class_loader.Get()));
- class_path = &Runtime::Current()->GetCompileTimeClassPath(jclass_loader.get());
- }
- pair = FindInClassPath(descriptor, hash, *class_path);
- if (pair.second != nullptr) {
- return DefineClass(self, descriptor, hash, class_loader, *pair.first, *pair.second);
- } else {
- // Use the pre-allocated NCDFE at compile time to avoid wasting time constructing exceptions.
- mirror::Throwable* pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
- self->SetException(pre_allocated);
- return nullptr;
- }
} else {
ScopedObjectAccessUnchecked soa(self);
mirror::Class* cp_klass = FindClassInPathClassLoader(soa, self, descriptor, hash,
@@ -1411,6 +1397,14 @@
if (cp_klass != nullptr) {
return cp_klass;
}
+
+ if (Runtime::Current()->IsAotCompiler()) {
+ // Oops, compile-time, can't run actual class-loader code.
+ mirror::Throwable* pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+ self->SetException(pre_allocated);
+ return nullptr;
+ }
+
ScopedLocalRef<jobject> class_loader_object(soa.Env(),
soa.AddLocalReference<jobject>(class_loader.Get()));
std::string class_name_string(DescriptorToDot(descriptor));
@@ -1767,7 +1761,7 @@
return; // No direct methods => no static methods.
}
Runtime* runtime = Runtime::Current();
- if (!runtime->IsStarted() || runtime->UseCompileTimeClassPath()) {
+ if (!runtime->IsStarted()) {
if (runtime->IsAotCompiler() || runtime->GetHeap()->HasImageSpace()) {
return; // OAT file unavailable.
}
@@ -1907,7 +1901,7 @@
bool has_oat_class = false;
- if (Runtime::Current()->IsStarted() && !Runtime::Current()->UseCompileTimeClassPath()) {
+ if (Runtime::Current()->IsStarted() && !Runtime::Current()->IsAotCompiler()) {
OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
&has_oat_class);
if (has_oat_class) {
@@ -2808,7 +2802,7 @@
// classes.
if (Runtime::Current()->IsAotCompiler()) {
// Are we compiling the bootclasspath?
- if (!Runtime::Current()->UseCompileTimeClassPath()) {
+ if (Runtime::Current()->GetCompilerCallbacks()->IsBootImage()) {
return false;
}
// We are compiling an app (not the image).
@@ -5213,10 +5207,12 @@
"Ljava/lang/ref/Reference;",
"Ljava/lang/reflect/ArtField;",
"Ljava/lang/reflect/ArtMethod;",
+ "Ljava/lang/reflect/Field;",
"Ljava/lang/reflect/Proxy;",
"[Ljava/lang/String;",
"[Ljava/lang/reflect/ArtField;",
"[Ljava/lang/reflect/ArtMethod;",
+ "[Ljava/lang/reflect/Field;",
"Ljava/lang/ClassLoader;",
"Ljava/lang/Throwable;",
"Ljava/lang/ClassNotFoundException;",
@@ -5286,6 +5282,10 @@
}
bool ClassLinker::MayBeCalledWithDirectCodePointer(mirror::ArtMethod* m) {
+ if (Runtime::Current()->UseJit()) {
+ // JIT can have direct code pointers from any method to any other method.
+ return true;
+ }
// Non-image methods don't use direct code pointer.
if (!m->GetDeclaringClass()->IsBootStrapClassLoaded()) {
return false;
@@ -5315,4 +5315,95 @@
}
}
+jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files) {
+ // SOAAlreadyRunnable is protected, and we need something to add a global reference.
+ // We could move the jobject to the callers, but all call-sites do this...
+ ScopedObjectAccessUnchecked soa(self);
+
+ // Register the dex files.
+ for (const DexFile* dex_file : dex_files) {
+ RegisterDexFile(*dex_file);
+ }
+
+ // For now, create a libcore-level DexFile for each ART DexFile. This "explodes" multidex.
+ StackHandleScope<11> hs(self);
+
+ Handle<mirror::ArtField> h_dex_elements_field =
+ hs.NewHandle(soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements));
+
+ mirror::Class* dex_elements_class = h_dex_elements_field->GetType<true>();
+ DCHECK(dex_elements_class != nullptr);
+ DCHECK(dex_elements_class->IsArrayClass());
+ Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements(hs.NewHandle(
+ mirror::ObjectArray<mirror::Object>::Alloc(self, dex_elements_class, dex_files.size())));
+ Handle<mirror::Class> h_dex_element_class =
+ hs.NewHandle(dex_elements_class->GetComponentType());
+
+ Handle<mirror::ArtField> h_element_file_field =
+ hs.NewHandle(
+ soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile));
+ DCHECK_EQ(h_dex_element_class.Get(), h_element_file_field->GetDeclaringClass());
+
+ Handle<mirror::ArtField> h_cookie_field =
+ hs.NewHandle(soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie));
+ DCHECK_EQ(h_cookie_field->GetDeclaringClass(), h_element_file_field->GetType<false>());
+
+ // Fill the elements array.
+ int32_t index = 0;
+ for (const DexFile* dex_file : dex_files) {
+ StackHandleScope<3> hs2(self);
+
+ Handle<mirror::LongArray> h_long_array = hs2.NewHandle(mirror::LongArray::Alloc(self, 1));
+ DCHECK(h_long_array.Get() != nullptr);
+ h_long_array->Set(0, reinterpret_cast<intptr_t>(dex_file));
+
+ Handle<mirror::Object> h_dex_file = hs2.NewHandle(
+ h_cookie_field->GetDeclaringClass()->AllocObject(self));
+ DCHECK(h_dex_file.Get() != nullptr);
+ h_cookie_field->SetObject<false>(h_dex_file.Get(), h_long_array.Get());
+
+ Handle<mirror::Object> h_element = hs2.NewHandle(h_dex_element_class->AllocObject(self));
+ DCHECK(h_element.Get() != nullptr);
+ h_element_file_field->SetObject<false>(h_element.Get(), h_dex_file.Get());
+
+ h_dex_elements->Set(index, h_element.Get());
+ index++;
+ }
+ DCHECK_EQ(index, h_dex_elements->GetLength());
+
+ // Create DexPathList.
+ Handle<mirror::Object> h_dex_path_list = hs.NewHandle(
+ h_dex_elements_field->GetDeclaringClass()->AllocObject(self));
+ DCHECK(h_dex_path_list.Get() != nullptr);
+ // Set elements.
+ h_dex_elements_field->SetObject<false>(h_dex_path_list.Get(), h_dex_elements.Get());
+
+ // Create PathClassLoader.
+ Handle<mirror::Class> h_path_class_class = hs.NewHandle(
+ soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader));
+ Handle<mirror::Object> h_path_class_loader = hs.NewHandle(
+ h_path_class_class->AllocObject(self));
+ DCHECK(h_path_class_loader.Get() != nullptr);
+ // Set DexPathList.
+ Handle<mirror::ArtField> h_path_list_field = hs.NewHandle(
+ soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList));
+ DCHECK(h_path_list_field.Get() != nullptr);
+ h_path_list_field->SetObject<false>(h_path_class_loader.Get(), h_dex_path_list.Get());
+
+ // Make a pretend boot-classpath.
+ // TODO: Should we scan the image?
+ Handle<mirror::ArtField> h_parent_field = hs.NewHandle(
+ mirror::Class::FindField(self, hs.NewHandle(h_path_class_loader->GetClass()), "parent",
+ "Ljava/lang/ClassLoader;"));
+ DCHECK(h_parent_field.Get() != nullptr);
+ mirror::Object* boot_cl =
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader)->AllocObject(self);
+ h_parent_field->SetObject<false>(h_path_class_loader.Get(), boot_cl);
+
+ // Make it a global ref and return.
+ ScopedLocalRef<jobject> local_ref(
+ soa.Env(), soa.Env()->AddLocalReference<jobject>(h_path_class_loader.Get()));
+ return soa.Env()->NewGlobalRef(local_ref.get());
+}
+
} // namespace art
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 4ebce3e..ec984cb 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -72,10 +72,12 @@
kJavaLangRefReference,
kJavaLangReflectArtField,
kJavaLangReflectArtMethod,
+ kJavaLangReflectField,
kJavaLangReflectProxy,
kJavaLangStringArrayClass,
kJavaLangReflectArtFieldArrayClass,
kJavaLangReflectArtMethodArrayClass,
+ kJavaLangReflectFieldArrayClass,
kJavaLangClassLoader,
kJavaLangThrowable,
kJavaLangClassNotFoundException,
@@ -454,6 +456,11 @@
bool MayBeCalledWithDirectCodePointer(mirror::ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files.
+ // Note: the objects are not completely set up. Do not use this outside of tests and the compiler.
+ jobject CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
static void InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 1789ab1..3e727e7 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -24,11 +24,13 @@
#include "dex_file.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/heap.h"
+#include "mirror/accessible_object.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
+#include "mirror/field.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/proxy.h"
@@ -177,7 +179,7 @@
EXPECT_TRUE(field->GetClass() != nullptr);
EXPECT_EQ(klass, field->GetDeclaringClass());
EXPECT_TRUE(field->GetName() != nullptr);
- EXPECT_TRUE(field->GetType(true) != nullptr);
+ EXPECT_TRUE(field->GetType<true>() != nullptr);
}
void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass)
@@ -283,7 +285,7 @@
for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
mirror::ArtField* field = klass->GetInstanceField(i);
fhandle.Assign(field);
- mirror::Class* field_type = fhandle->GetType(true);
+ mirror::Class* field_type = fhandle->GetType<true>();
ASSERT_TRUE(field_type != nullptr);
if (!field->IsPrimitiveType()) {
ASSERT_TRUE(!field_type->IsPrimitive());
@@ -394,7 +396,12 @@
// Art method have a different size due to the padding field.
if (!klass->IsArtMethodClass() && !klass->IsClassClass() && !is_static) {
- size_t expected_size = is_static ? klass->GetClassSize(): klass->GetObjectSize();
+ // Currently only required for AccessibleObject since of the padding fields. The class linker
+ // says AccessibleObject is 9 bytes but sizeof(AccessibleObject) is 12 bytes due to padding.
+ // The RoundUp is to get around this case.
+ static constexpr size_t kPackAlignment = 4;
+ size_t expected_size = RoundUp(is_static ? klass->GetClassSize(): klass->GetObjectSize(),
+ kPackAlignment);
if (sizeof(T) != expected_size) {
LOG(ERROR) << "Class size mismatch:"
<< " class=" << class_descriptor
@@ -596,6 +603,22 @@
};
};
+struct AccessibleObjectOffsets : public CheckOffsets<mirror::AccessibleObject> {
+ AccessibleObjectOffsets() : CheckOffsets<mirror::AccessibleObject>(false, "Ljava/lang/reflect/AccessibleObject;") {
+ offsets.push_back(CheckOffset(mirror::AccessibleObject::FlagOffset().Uint32Value(), "flag"));
+ };
+};
+
+struct FieldOffsets : public CheckOffsets<mirror::Field> {
+ FieldOffsets() : CheckOffsets<mirror::Field>(false, "Ljava/lang/reflect/Field;") {
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, access_flags_), "accessFlags"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, declaring_class_), "declaringClass"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, dex_field_index_), "dexFieldIndex"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, offset_), "offset"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, type_), "type"));
+ };
+};
+
// C++ fields must exactly match the fields in the Java classes. If this fails,
// reorder the fields in the C++ class. Managed class fields are ordered by
// ClassLinker::LinkFields.
@@ -613,6 +636,8 @@
EXPECT_TRUE(DexCacheOffsets().Check());
EXPECT_TRUE(ReferenceOffsets().Check());
EXPECT_TRUE(FinalizerReferenceOffsets().Check());
+ EXPECT_TRUE(AccessibleObjectOffsets().Check());
+ EXPECT_TRUE(FieldOffsets().Check());
}
TEST_F(ClassLinkerTest, FindClassNonexistent) {
@@ -621,6 +646,20 @@
AssertNonExistentClass("LNoSuchClass;");
}
+TEST_F(ClassLinkerTest, GetDexFiles) {
+ ScopedObjectAccess soa(Thread::Current());
+
+ jobject jclass_loader = LoadDex("Nested");
+ std::vector<const DexFile*> dex_files(GetDexFiles(jclass_loader));
+ ASSERT_EQ(dex_files.size(), 1U);
+ EXPECT_TRUE(EndsWith(dex_files[0]->GetLocation(), "Nested.jar"));
+
+ jobject jclass_loader2 = LoadDex("MultiDex");
+ std::vector<const DexFile*> dex_files2(GetDexFiles(jclass_loader2));
+ ASSERT_EQ(dex_files2.size(), 2U);
+ EXPECT_TRUE(EndsWith(dex_files2[0]->GetLocation(), "MultiDex.jar"));
+}
+
TEST_F(ClassLinkerTest, FindClassNested) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
@@ -985,11 +1024,10 @@
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("StaticsFromCode");
+ const DexFile* dex_file = GetFirstDexFile(jclass_loader);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(jclass_loader)[0];
- CHECK(dex_file != nullptr);
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader);
mirror::ArtMethod* clinit = klass->FindClassInitializer();
mirror::ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;");
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index e0d62d7..d400010 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -34,6 +34,7 @@
#include "gc_root-inl.h"
#include "gc/heap.h"
#include "gtest/gtest.h"
+#include "handle_scope-inl.h"
#include "interpreter/unstarted_runtime.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
@@ -219,7 +220,6 @@
std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB));
std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB));
- callbacks_.reset(new NoopCompilerCallbacks());
RuntimeOptions options;
std::string boot_class_path_string = "-Xbootclasspath:" + GetLibCoreDexFileName();
@@ -227,9 +227,16 @@
options.push_back(std::make_pair("-Xcheck:jni", nullptr));
options.push_back(std::make_pair(min_heap_string, nullptr));
options.push_back(std::make_pair(max_heap_string, nullptr));
- options.push_back(std::make_pair("compilercallbacks", callbacks_.get()));
+
+ callbacks_.reset(new NoopCompilerCallbacks());
+
SetUpRuntimeOptions(&options);
+ // Install compiler-callbacks if SetupRuntimeOptions hasn't deleted them.
+ if (callbacks_.get() != nullptr) {
+ options.push_back(std::make_pair("compilercallbacks", callbacks_.get()));
+ }
+
PreRuntimeCreate();
if (!Runtime::Create(options, false)) {
LOG(FATAL) << "Failed to create runtime";
@@ -386,22 +393,89 @@
return std::move(vector[0]);
}
+std::vector<const DexFile*> CommonRuntimeTest::GetDexFiles(jobject jclass_loader) {
+ std::vector<const DexFile*> ret;
+
+ ScopedObjectAccess soa(Thread::Current());
+
+ StackHandleScope<4> hs(Thread::Current());
+ Handle<mirror::ClassLoader> class_loader = hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(jclass_loader));
+
+ DCHECK_EQ(class_loader->GetClass(),
+ soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader));
+ DCHECK_EQ(class_loader->GetParent()->GetClass(),
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader));
+
+ // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
+ // We need to get the DexPathList and loop through it.
+ Handle<mirror::ArtField> cookie_field =
+ hs.NewHandle(soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie));
+ Handle<mirror::ArtField> dex_file_field =
+ hs.NewHandle(
+ soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile));
+ mirror::Object* dex_path_list =
+ soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
+ GetObject(class_loader.Get());
+ if (dex_path_list != nullptr && dex_file_field.Get() != nullptr &&
+ cookie_field.Get() != nullptr) {
+ // DexPathList has an array dexElements of Elements[] which each contain a dex file.
+ mirror::Object* dex_elements_obj =
+ soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+ GetObject(dex_path_list);
+ // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
+ // at the mCookie which is a DexFile vector.
+ if (dex_elements_obj != nullptr) {
+ Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
+ hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
+ for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
+ mirror::Object* element = dex_elements->GetWithoutChecks(i);
+ if (element == nullptr) {
+ // Should never happen, fall back to java code to throw a NPE.
+ break;
+ }
+ mirror::Object* dex_file = dex_file_field->GetObject(element);
+ if (dex_file != nullptr) {
+ mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
+ DCHECK(long_array != nullptr);
+ int32_t long_array_size = long_array->GetLength();
+ for (int32_t j = 0; j < long_array_size; ++j) {
+ const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
+ long_array->GetWithoutChecks(j)));
+ if (cp_dex_file == nullptr) {
+ LOG(WARNING) << "Null DexFile";
+ continue;
+ }
+ ret.push_back(cp_dex_file);
+ }
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+const DexFile* CommonRuntimeTest::GetFirstDexFile(jobject jclass_loader) {
+ std::vector<const DexFile*> tmp(GetDexFiles(jclass_loader));
+ DCHECK(!tmp.empty());
+ const DexFile* ret = tmp[0];
+ DCHECK(ret != nullptr);
+ return ret;
+}
+
jobject CommonRuntimeTest::LoadDex(const char* dex_name) {
std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles(dex_name);
std::vector<const DexFile*> class_path;
CHECK_NE(0U, dex_files.size());
for (auto& dex_file : dex_files) {
class_path.push_back(dex_file.get());
- class_linker_->RegisterDexFile(*dex_file);
loaded_dex_files_.push_back(std::move(dex_file));
}
+
Thread* self = Thread::Current();
- JNIEnvExt* env = self->GetJniEnv();
- ScopedLocalRef<jobject> class_loader_local(env,
- env->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
- jobject class_loader = env->NewGlobalRef(class_loader_local.get());
- self->SetClassLoaderOverride(class_loader_local.get());
- Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path);
+ jobject class_loader = Runtime::Current()->GetClassLinker()->CreatePathClassLoader(self, class_path);
+ self->SetClassLoaderOverride(class_loader);
return class_loader;
}
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index cce8485..5fbc2ee 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -133,10 +133,18 @@
const DexFile* java_lang_dex_file_;
std::vector<const DexFile*> boot_class_path_;
+ // Get the dex files from a PathClassLoader. This in order of the dex elements and their dex
+ // arrays.
+ std::vector<const DexFile*> GetDexFiles(jobject jclass_loader);
+
+ // Get the first dex file from a PathClassLoader. Will abort if it is null.
+ const DexFile* GetFirstDexFile(jobject jclass_loader);
+
+ std::unique_ptr<CompilerCallbacks> callbacks_;
+
private:
static std::string GetCoreFileLocation(const char* suffix);
- std::unique_ptr<CompilerCallbacks> callbacks_;
std::vector<std::unique_ptr<const DexFile>> loaded_dex_files_;
};
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index d1a6861..b296e39 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -29,19 +29,32 @@
} // namespace verifier
class CompilerCallbacks {
- public:
- virtual ~CompilerCallbacks() { }
+ public:
+ enum class CallbackMode { // private
+ kCompileBootImage,
+ kCompileApp
+ };
- virtual bool MethodVerified(verifier::MethodVerifier* verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
- virtual void ClassRejected(ClassReference ref) = 0;
+ virtual ~CompilerCallbacks() { }
- // Return true if we should attempt to relocate to a random base address if we have not already
- // done so. Return false if relocating in this way would be problematic.
- virtual bool IsRelocationPossible() = 0;
+ virtual bool MethodVerified(verifier::MethodVerifier* verifier)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual void ClassRejected(ClassReference ref) = 0;
- protected:
- CompilerCallbacks() { }
+ // Return true if we should attempt to relocate to a random base address if we have not already
+ // done so. Return false if relocating in this way would be problematic.
+ virtual bool IsRelocationPossible() = 0;
+
+ bool IsBootImage() {
+ return mode_ == CallbackMode::kCompileBootImage;
+ }
+
+ protected:
+ explicit CompilerCallbacks(CallbackMode mode) : mode_(mode) { }
+
+ private:
+ // Whether the compiler is creating a boot image.
+ const CallbackMode mode_;
};
} // namespace art
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 6296cf5..a767cf0 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -307,7 +307,6 @@
// Runtime JDWP state.
static JDWP::JdwpState* gJdwpState = nullptr;
static bool gDebuggerConnected; // debugger or DDMS is connected.
-static bool gDebuggerActive; // debugger is making requests.
static bool gDisposed; // debugger called VirtualMachine.Dispose, so we should drop the connection.
static bool gDdmThreadNotification = false;
@@ -319,6 +318,7 @@
static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
static Dbg::HpsgWhat gDdmNhsgWhat;
+bool Dbg::gDebuggerActive = false;
ObjectRegistry* Dbg::gRegistry = nullptr;
// Recent allocation tracking.
@@ -331,7 +331,6 @@
// Deoptimization support.
std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
size_t Dbg::full_deoptimization_event_count_ = 0;
-size_t Dbg::delayed_full_undeoptimization_count_ = 0;
// Instrumentation event reference counters.
size_t Dbg::dex_pc_change_event_ref_count_ = 0;
@@ -620,7 +619,7 @@
// Enable all debugging features, including scans for breakpoints.
// This is a no-op if we're already active.
// Only called from the JDWP handler thread.
- if (gDebuggerActive) {
+ if (IsDebuggerActive()) {
return;
}
@@ -634,7 +633,6 @@
MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
CHECK_EQ(deoptimization_requests_.size(), 0U);
CHECK_EQ(full_deoptimization_event_count_, 0U);
- CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
CHECK_EQ(method_enter_event_ref_count_, 0U);
CHECK_EQ(method_exit_event_ref_count_, 0U);
@@ -673,7 +671,7 @@
ThreadState old_state = self->SetStateUnsafe(kRunnable);
// Debugger may not be active at this point.
- if (gDebuggerActive) {
+ if (IsDebuggerActive()) {
{
// Since we're going to disable deoptimization, we clear the deoptimization requests queue.
// This prevents us from having any pending deoptimization request when the debugger attaches
@@ -681,7 +679,6 @@
MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
deoptimization_requests_.clear();
full_deoptimization_event_count_ = 0U;
- delayed_full_undeoptimization_count_ = 0U;
}
if (instrumentation_events_ != 0) {
runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
@@ -704,10 +701,6 @@
gDebuggerConnected = false;
}
-bool Dbg::IsDebuggerActive() {
- return gDebuggerActive;
-}
-
void Dbg::ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options) {
CHECK_NE(jdwp_options.transport, JDWP::kJdwpTransportUnknown);
gJdwpOptions = jdwp_options;
@@ -1458,22 +1451,31 @@
* Circularly shifts registers so that arguments come last. Reverts
* slots to dex style argument placement.
*/
-static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
+static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m, JDWP::JdwpError* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
// return the slot as is since all registers are arguments.
LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
- return slot;
- }
- uint16_t ins_size = code_item->ins_size_;
- uint16_t locals_size = code_item->registers_size_ - ins_size;
- if (slot < ins_size) {
- return slot + locals_size;
+ uint16_t vreg_count = mirror::ArtMethod::NumArgRegisters(m->GetShorty());
+ if (slot < vreg_count) {
+ *error = JDWP::ERR_NONE;
+ return slot;
+ }
} else {
- return slot - ins_size;
+ if (slot < code_item->registers_size_) {
+ uint16_t ins_size = code_item->ins_size_;
+ uint16_t locals_size = code_item->registers_size_ - ins_size;
+ *error = JDWP::ERR_NONE;
+ return (slot < ins_size) ? slot + locals_size : slot - ins_size;
+ }
}
+
+ // Slot is invalid in the method.
+ LOG(ERROR) << "Invalid local slot " << slot << " for method " << PrettyMethod(m);
+ *error = JDWP::ERR_INVALID_SLOT;
+ return DexFile::kDexNoIndex16;
}
JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
@@ -1805,7 +1807,7 @@
HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
- field_type = h_f->GetType(true);
+ field_type = h_f->GetType<true>();
}
if (!field_type->IsAssignableFrom(v->GetClass())) {
return JDWP::ERR_INVALID_OBJECT;
@@ -2434,6 +2436,9 @@
if (error != JDWP::ERR_NONE) {
return error;
}
+ if (!IsSuspendedForDebugger(soa, thread)) {
+ return JDWP::ERR_THREAD_NOT_SUSPENDED;
+ }
}
// Find the frame with the given frame_id.
std::unique_ptr<Context> context(Context::Create());
@@ -2462,73 +2467,81 @@
return JDWP::ERR_NONE;
}
+constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
+
+static std::string GetStackContextAsString(const StackVisitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
+ PrettyMethod(visitor.GetMethod()).c_str());
+}
+
+static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
+ JDWP::JdwpTag tag)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
+ << GetStackContextAsString(visitor);
+ return kStackFrameLocalAccessError;
+}
+
JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
mirror::ArtMethod* m = visitor.GetMethod();
- uint16_t reg = DemangleSlot(slot, m);
+ JDWP::JdwpError error = JDWP::ERR_NONE;
+ uint16_t vreg = DemangleSlot(slot, m, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
// TODO: check that the tag is compatible with the actual type of the slot!
- // TODO: check slot is valid for this method or return INVALID_SLOT error.
- constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
switch (tag) {
case JDWP::JT_BOOLEAN: {
CHECK_EQ(width, 1U);
uint32_t intVal;
- if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
- VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
- JDWP::Set1(buf + 1, intVal != 0);
- } else {
- VLOG(jdwp) << "failed to get boolean local " << reg;
- return kFailureErrorCode;
+ if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
+ return FailGetLocalValue(visitor, vreg, tag);
}
+ VLOG(jdwp) << "get boolean local " << vreg << " = " << intVal;
+ JDWP::Set1(buf + 1, intVal != 0);
break;
}
case JDWP::JT_BYTE: {
CHECK_EQ(width, 1U);
uint32_t intVal;
- if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
- VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
- JDWP::Set1(buf + 1, intVal);
- } else {
- VLOG(jdwp) << "failed to get byte local " << reg;
- return kFailureErrorCode;
+ if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
+ return FailGetLocalValue(visitor, vreg, tag);
}
+ VLOG(jdwp) << "get byte local " << vreg << " = " << intVal;
+ JDWP::Set1(buf + 1, intVal);
break;
}
case JDWP::JT_SHORT:
case JDWP::JT_CHAR: {
CHECK_EQ(width, 2U);
uint32_t intVal;
- if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
- VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
- JDWP::Set2BE(buf + 1, intVal);
- } else {
- VLOG(jdwp) << "failed to get short/char local " << reg;
- return kFailureErrorCode;
+ if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
+ return FailGetLocalValue(visitor, vreg, tag);
}
+ VLOG(jdwp) << "get short/char local " << vreg << " = " << intVal;
+ JDWP::Set2BE(buf + 1, intVal);
break;
}
case JDWP::JT_INT: {
CHECK_EQ(width, 4U);
uint32_t intVal;
- if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
- VLOG(jdwp) << "get int local " << reg << " = " << intVal;
- JDWP::Set4BE(buf + 1, intVal);
- } else {
- VLOG(jdwp) << "failed to get int local " << reg;
- return kFailureErrorCode;
+ if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
+ return FailGetLocalValue(visitor, vreg, tag);
}
+ VLOG(jdwp) << "get int local " << vreg << " = " << intVal;
+ JDWP::Set4BE(buf + 1, intVal);
break;
}
case JDWP::JT_FLOAT: {
CHECK_EQ(width, 4U);
uint32_t intVal;
- if (visitor.GetVReg(m, reg, kFloatVReg, &intVal)) {
- VLOG(jdwp) << "get float local " << reg << " = " << intVal;
- JDWP::Set4BE(buf + 1, intVal);
- } else {
- VLOG(jdwp) << "failed to get float local " << reg;
- return kFailureErrorCode;
+ if (!visitor.GetVReg(m, vreg, kFloatVReg, &intVal)) {
+ return FailGetLocalValue(visitor, vreg, tag);
}
+ VLOG(jdwp) << "get float local " << vreg << " = " << intVal;
+ JDWP::Set4BE(buf + 1, intVal);
break;
}
case JDWP::JT_ARRAY:
@@ -2540,47 +2553,44 @@
case JDWP::JT_THREAD_GROUP: {
CHECK_EQ(width, sizeof(JDWP::ObjectId));
uint32_t intVal;
- if (visitor.GetVReg(m, reg, kReferenceVReg, &intVal)) {
- mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
- VLOG(jdwp) << "get " << tag << " object local " << reg << " = " << o;
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- LOG(FATAL) << "Register " << reg << " expected to hold " << tag << " object: " << o;
- }
- tag = TagFromObject(soa, o);
- JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
- } else {
- VLOG(jdwp) << "failed to get " << tag << " object local " << reg;
- return kFailureErrorCode;
+ if (!visitor.GetVReg(m, vreg, kReferenceVReg, &intVal)) {
+ return FailGetLocalValue(visitor, vreg, tag);
}
+ mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
+ VLOG(jdwp) << "get " << tag << " object local " << vreg << " = " << o;
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
+ LOG(FATAL) << StringPrintf("Found invalid object %#" PRIxPTR " in register v%u",
+ reinterpret_cast<uintptr_t>(o), vreg)
+ << GetStackContextAsString(visitor);
+ UNREACHABLE();
+ }
+ tag = TagFromObject(soa, o);
+ JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
break;
}
case JDWP::JT_DOUBLE: {
CHECK_EQ(width, 8U);
uint64_t longVal;
- if (visitor.GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
- VLOG(jdwp) << "get double local " << reg << " = " << longVal;
- JDWP::Set8BE(buf + 1, longVal);
- } else {
- VLOG(jdwp) << "failed to get double local " << reg;
- return kFailureErrorCode;
+ if (!visitor.GetVRegPair(m, vreg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
+ return FailGetLocalValue(visitor, vreg, tag);
}
+ VLOG(jdwp) << "get double local " << vreg << " = " << longVal;
+ JDWP::Set8BE(buf + 1, longVal);
break;
}
case JDWP::JT_LONG: {
CHECK_EQ(width, 8U);
uint64_t longVal;
- if (visitor.GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
- VLOG(jdwp) << "get long local " << reg << " = " << longVal;
- JDWP::Set8BE(buf + 1, longVal);
- } else {
- VLOG(jdwp) << "failed to get long local " << reg;
- return kFailureErrorCode;
+ if (!visitor.GetVRegPair(m, vreg, kLongLoVReg, kLongHiVReg, &longVal)) {
+ return FailGetLocalValue(visitor, vreg, tag);
}
+ VLOG(jdwp) << "get long local " << vreg << " = " << longVal;
+ JDWP::Set8BE(buf + 1, longVal);
break;
}
default:
LOG(FATAL) << "Unknown tag " << tag;
- break;
+ UNREACHABLE();
}
// Prepend tag, which may have been updated.
@@ -2601,6 +2611,9 @@
if (error != JDWP::ERR_NONE) {
return error;
}
+ if (!IsSuspendedForDebugger(soa, thread)) {
+ return JDWP::ERR_THREAD_NOT_SUSPENDED;
+ }
}
// Find the frame with the given frame_id.
std::unique_ptr<Context> context(Context::Create());
@@ -2627,46 +2640,50 @@
return JDWP::ERR_NONE;
}
+template<typename T>
+static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
+ JDWP::JdwpTag tag, T value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ LOG(ERROR) << "Failed to write " << tag << " local " << value
+ << " (0x" << std::hex << value << ") into register v" << vreg
+ << GetStackContextAsString(visitor);
+ return kStackFrameLocalAccessError;
+}
+
JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag,
uint64_t value, size_t width) {
mirror::ArtMethod* m = visitor.GetMethod();
- uint16_t reg = DemangleSlot(slot, m);
+ JDWP::JdwpError error = JDWP::ERR_NONE;
+ uint16_t vreg = DemangleSlot(slot, m, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
// TODO: check that the tag is compatible with the actual type of the slot!
- // TODO: check slot is valid for this method or return INVALID_SLOT error.
- constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
switch (tag) {
case JDWP::JT_BOOLEAN:
case JDWP::JT_BYTE:
CHECK_EQ(width, 1U);
- if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
- VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
- << static_cast<uint32_t>(value);
- return kFailureErrorCode;
+ if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
+ return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
}
break;
case JDWP::JT_SHORT:
case JDWP::JT_CHAR:
CHECK_EQ(width, 2U);
- if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
- VLOG(jdwp) << "failed to set short/char local " << reg << " = "
- << static_cast<uint32_t>(value);
- return kFailureErrorCode;
+ if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
+ return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
}
break;
case JDWP::JT_INT:
CHECK_EQ(width, 4U);
- if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
- VLOG(jdwp) << "failed to set int local " << reg << " = "
- << static_cast<uint32_t>(value);
- return kFailureErrorCode;
+ if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
+ return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
}
break;
case JDWP::JT_FLOAT:
CHECK_EQ(width, 4U);
- if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kFloatVReg)) {
- VLOG(jdwp) << "failed to set float local " << reg << " = "
- << static_cast<uint32_t>(value);
- return kFailureErrorCode;
+ if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kFloatVReg)) {
+ return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
}
break;
case JDWP::JT_ARRAY:
@@ -2677,38 +2694,35 @@
case JDWP::JT_THREAD:
case JDWP::JT_THREAD_GROUP: {
CHECK_EQ(width, sizeof(JDWP::ObjectId));
- JDWP::JdwpError error;
mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value),
&error);
if (error != JDWP::ERR_NONE) {
VLOG(jdwp) << tag << " object " << o << " is an invalid object";
return JDWP::ERR_INVALID_OBJECT;
- } else if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
- kReferenceVReg)) {
- VLOG(jdwp) << "failed to set " << tag << " object local " << reg << " = " << o;
- return kFailureErrorCode;
+ }
+ if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
+ kReferenceVReg)) {
+ return FailSetLocalValue(visitor, vreg, tag, reinterpret_cast<uintptr_t>(o));
}
break;
}
case JDWP::JT_DOUBLE: {
CHECK_EQ(width, 8U);
- if (!visitor.SetVRegPair(m, reg, value, kDoubleLoVReg, kDoubleHiVReg)) {
- VLOG(jdwp) << "failed to set double local " << reg << " = " << value;
- return kFailureErrorCode;
+ if (!visitor.SetVRegPair(m, vreg, value, kDoubleLoVReg, kDoubleHiVReg)) {
+ return FailSetLocalValue(visitor, vreg, tag, value);
}
break;
}
case JDWP::JT_LONG: {
CHECK_EQ(width, 8U);
- if (!visitor.SetVRegPair(m, reg, value, kLongLoVReg, kLongHiVReg)) {
- VLOG(jdwp) << "failed to set double local " << reg << " = " << value;
- return kFailureErrorCode;
+ if (!visitor.SetVRegPair(m, vreg, value, kLongLoVReg, kLongHiVReg)) {
+ return FailSetLocalValue(visitor, vreg, tag, value);
}
break;
}
default:
LOG(FATAL) << "Unknown tag " << tag;
- break;
+ UNREACHABLE();
}
return JDWP::ERR_NONE;
}
@@ -3020,29 +3034,6 @@
}
}
-void Dbg::DelayFullUndeoptimization() {
- if (RequiresDeoptimization()) {
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- ++delayed_full_undeoptimization_count_;
- DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
- }
-}
-
-void Dbg::ProcessDelayedFullUndeoptimizations() {
- // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
- {
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- while (delayed_full_undeoptimization_count_ > 0) {
- DeoptimizationRequest req;
- req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
- req.SetMethod(nullptr);
- RequestDeoptimizationLocked(req);
- --delayed_full_undeoptimization_count_;
- }
- }
- ManageDeoptimization();
-}
-
void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
if (req.GetKind() == DeoptimizationRequest::kNothing) {
// Nothing to do.
@@ -3183,6 +3174,11 @@
return nullptr;
}
+bool Dbg::MethodHasAnyBreakpoints(mirror::ArtMethod* method) {
+ ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
+ return FindFirstBreakpointForMethod(method) != nullptr;
+}
+
// Sanity checks all existing breakpoints on the same method.
static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m,
DeoptimizationRequest::Kind deoptimization_kind)
@@ -3352,6 +3348,125 @@
}
}
+bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, mirror::ArtMethod* m) {
+ const SingleStepControl* const ssc = thread->GetSingleStepControl();
+ if (ssc == nullptr) {
+ // If we are not single-stepping, then we don't have to force interpreter.
+ return false;
+ }
+ if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
+ // If we are in interpreter only mode, then we don't have to force interpreter.
+ return false;
+ }
+
+ if (!m->IsNative() && !m->IsProxyMethod()) {
+ // If we want to step into a method, then we have to force interpreter on that call.
+ if (ssc->GetStepDepth() == JDWP::SD_INTO) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) {
+ instrumentation::Instrumentation* const instrumentation =
+ Runtime::Current()->GetInstrumentation();
+ // If we are in interpreter only mode, then we don't have to force interpreter.
+ if (instrumentation->InterpretOnly()) {
+ return false;
+ }
+ // We can only interpret pure Java method.
+ if (m->IsNative() || m->IsProxyMethod()) {
+ return false;
+ }
+ const SingleStepControl* const ssc = thread->GetSingleStepControl();
+ if (ssc != nullptr) {
+ // If we want to step into a method, then we have to force interpreter on that call.
+ if (ssc->GetStepDepth() == JDWP::SD_INTO) {
+ return true;
+ }
+ // If we are stepping out from a static initializer, by issuing a step
+ // in or step over, that was implicitly invoked by calling a static method,
+ // then we need to step into that method. Having a lower stack depth than
+ // the one the single step control has indicates that the step originates
+ // from the static initializer.
+ if (ssc->GetStepDepth() != JDWP::SD_OUT &&
+ ssc->GetStackDepth() > GetStackDepth(thread)) {
+ return true;
+ }
+ }
+ // There are cases where we have to force interpreter on deoptimized methods,
+ // because in some cases the call will not be performed by invoking an entry
+ // point that has been replaced by the deoptimization, but instead by directly
+ // invoking the compiled code of the method, for example.
+ return instrumentation->IsDeoptimized(m);
+}
+
+bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) {
+ // The upcall can be nullptr and in that case we don't need to do anything.
+ if (m == nullptr) {
+ return false;
+ }
+ instrumentation::Instrumentation* const instrumentation =
+ Runtime::Current()->GetInstrumentation();
+ // If we are in interpreter only mode, then we don't have to force interpreter.
+ if (instrumentation->InterpretOnly()) {
+ return false;
+ }
+ // We can only interpret pure Java method.
+ if (m->IsNative() || m->IsProxyMethod()) {
+ return false;
+ }
+ const SingleStepControl* const ssc = thread->GetSingleStepControl();
+ if (ssc != nullptr) {
+ // If we are stepping out from a static initializer, by issuing a step
+ // out, that was implicitly invoked by calling a static method, then we
+ // need to step into the caller of that method. Having a lower stack
+ // depth than the one the single step control has indicates that the
+ // step originates from the static initializer.
+ if (ssc->GetStepDepth() == JDWP::SD_OUT &&
+ ssc->GetStackDepth() > GetStackDepth(thread)) {
+ return true;
+ }
+ }
+ // If we are returning from a static intializer, that was implicitly
+ // invoked by calling a static method and the caller is deoptimized,
+ // then we have to deoptimize the stack without forcing interpreter
+ // on the static method that was called originally. This problem can
+ // be solved easily by forcing instrumentation on the called method,
+ // because the instrumentation exit hook will recognise the need of
+ // stack deoptimization by calling IsForcedInterpreterNeededForUpcall.
+ return instrumentation->IsDeoptimized(m);
+}
+
+bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m) {
+ // The upcall can be nullptr and in that case we don't need to do anything.
+ if (m == nullptr) {
+ return false;
+ }
+ instrumentation::Instrumentation* const instrumentation =
+ Runtime::Current()->GetInstrumentation();
+ // If we are in interpreter only mode, then we don't have to force interpreter.
+ if (instrumentation->InterpretOnly()) {
+ return false;
+ }
+ // We can only interpret pure Java method.
+ if (m->IsNative() || m->IsProxyMethod()) {
+ return false;
+ }
+ const SingleStepControl* const ssc = thread->GetSingleStepControl();
+ if (ssc != nullptr) {
+ // The debugger is not interested in what is happening under the level
+ // of the step, thus we only force interpreter when we are not below of
+ // the step.
+ if (ssc->GetStackDepth() >= GetStackDepth(thread)) {
+ return true;
+ }
+ }
+ // We have to require stack deoptimization if the upcall is deoptimized.
+ return instrumentation->IsDeoptimized(m);
+}
+
// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
// cause suspension if the thread is the current thread.
class ScopedThreadSuspension {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 01c9d5d..4f4a781 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -243,7 +243,9 @@
// Returns true if we're actually debugging with a real debugger, false if it's
// just DDMS (or nothing at all).
- static bool IsDebuggerActive();
+ static bool IsDebuggerActive() {
+ return gDebuggerActive;
+ }
// Configures JDWP with parsed command-line options.
static void ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options);
@@ -251,6 +253,10 @@
// Returns true if we had -Xrunjdwp or -agentlib:jdwp= on the command line.
static bool IsJdwpConfigured();
+ // Returns true if a method has any breakpoints.
+ static bool MethodHasAnyBreakpoints(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::breakpoint_lock_);
+
static bool IsDisposed();
/*
@@ -543,13 +549,6 @@
LOCKS_EXCLUDED(Locks::deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Support delayed full undeoptimization requests. This is currently only used for single-step
- // events.
- static void DelayFullUndeoptimization() LOCKS_EXCLUDED(Locks::deoptimization_lock_);
- static void ProcessDelayedFullUndeoptimizations()
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Manage deoptimization after updating JDWP events list. Suspends all threads, processes each
// request and finally resumes all threads.
static void ManageDeoptimization()
@@ -564,6 +563,53 @@
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ /*
+ * Forced interpreter checkers for single-step and continue support.
+ */
+
+ // Indicates whether we need to force the use of interpreter to invoke a method.
+ // This allows to single-step or continue into the called method.
+ static bool IsForcedInterpreterNeededForCalling(Thread* thread, mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (!IsDebuggerActive()) {
+ return false;
+ }
+ return IsForcedInterpreterNeededForCallingImpl(thread, m);
+ }
+
+ // Indicates whether we need to force the use of interpreter entrypoint when calling a
+ // method through the resolution trampoline. This allows to single-step or continue into
+ // the called method.
+ static bool IsForcedInterpreterNeededForResolution(Thread* thread, mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (!IsDebuggerActive()) {
+ return false;
+ }
+ return IsForcedInterpreterNeededForResolutionImpl(thread, m);
+ }
+
+ // Indicates whether we need to force the use of instrumentation entrypoint when calling
+ // a method through the resolution trampoline. This allows to deoptimize the stack for
+ // debugging when we returned from the called method.
+ static bool IsForcedInstrumentationNeededForResolution(Thread* thread, mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (!IsDebuggerActive()) {
+ return false;
+ }
+ return IsForcedInstrumentationNeededForResolutionImpl(thread, m);
+ }
+
+ // Indicates whether we need to force the use of interpreter when returning from the
+ // interpreter into the runtime. This allows to deoptimize the stack and continue
+ // execution with interpreter for debugging.
+ static bool IsForcedInterpreterNeededForUpcall(Thread* thread, mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (!IsDebuggerActive()) {
+ return false;
+ }
+ return IsForcedInterpreterNeededForUpcallImpl(thread, m);
+ }
+
// Single-stepping.
static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size,
JDWP::JdwpStepDepth depth)
@@ -690,11 +736,27 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(Locks::alloc_tracker_lock_);
static size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_);
static size_t alloc_record_head_ GUARDED_BY(Locks::alloc_tracker_lock_);
static size_t alloc_record_count_ GUARDED_BY(Locks::alloc_tracker_lock_);
+ // Indicates whether the debugger is making requests.
+ static bool gDebuggerActive;
+
+ // The registry mapping objects to JDWP ids.
static ObjectRegistry* gRegistry;
// Deoptimization requests to be processed each time the event list is updated. This is used when
@@ -709,10 +771,6 @@
// undeoptimize when the last event is unregistered (when the counter is set to 0).
static size_t full_deoptimization_event_count_ GUARDED_BY(Locks::deoptimization_lock_);
- // Count the number of full undeoptimization requests delayed to next resume or end of debug
- // session.
- static size_t delayed_full_undeoptimization_count_ GUARDED_BY(Locks::deoptimization_lock_);
-
static size_t* GetReferenceCounterForEvent(uint32_t instrumentation_event);
// Weak global type cache, TODO improve this.
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index c8ede48..da39573 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -421,15 +421,26 @@
}
}
- std::string GetBaseLocation() const {
- size_t pos = location_.rfind(kMultiDexSeparator);
+ static std::string GetBaseLocation(const std::string& location) {
+ return GetBaseLocation(location.c_str());
+ }
+
+ // Returns the ':classes*.dex' part of the dex location. Returns an empty
+ // string if there is no multidex suffix for the given location.
+ // The kMultiDexSeparator is included in the returned suffix.
+ static std::string GetMultiDexSuffix(const std::string& location) {
+ size_t pos = location.rfind(kMultiDexSeparator);
if (pos == std::string::npos) {
- return location_;
+ return "";
} else {
- return location_.substr(0, pos);
+ return location.substr(pos);
}
}
+ std::string GetBaseLocation() const {
+ return GetBaseLocation(location_);
+ }
+
// For DexFiles directly from .dex files, this is the checksum from the DexFile::Header.
// For DexFiles opened from a zip files, this will be the ZipEntry CRC32 of classes.dex.
uint32_t GetLocationChecksum() const {
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 7f5a181..09ef3ee 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -377,4 +377,13 @@
ASSERT_EQ(0, unlink(dex_location_sym.c_str()));
}
+TEST(DexFileUtilsTest, GetBaseLocationAndMultiDexSuffix) {
+ EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar"));
+ EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar:classes2.dex"));
+ EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar:classes8.dex"));
+ EXPECT_EQ("", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar"));
+ EXPECT_EQ(":classes2.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar:classes2.dex"));
+ EXPECT_EQ(":classes8.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar:classes8.dex"));
+}
+
} // namespace art
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index a90f424..f8f85f9 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -133,7 +133,7 @@
V(0x70, INVOKE_DIRECT, "invoke-direct", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
V(0x71, INVOKE_STATIC, "invoke-static", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
V(0x72, INVOKE_INTERFACE, "invoke-interface", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
- V(0x73, RETURN_VOID_BARRIER, "return-void-barrier", k10x, false, kNone, kReturn, kVerifyNone) \
+ V(0x73, RETURN_VOID_NO_BARRIER, "return-void-no-barrier", k10x, false, kNone, kReturn, kVerifyNone) \
V(0x74, INVOKE_VIRTUAL_RANGE, "invoke-virtual/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
V(0x75, INVOKE_SUPER_RANGE, "invoke-super/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
V(0x76, INVOKE_DIRECT_RANGE, "invoke-direct/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
diff --git a/runtime/dex_instruction_utils.h b/runtime/dex_instruction_utils.h
index 1a671c5..f892f98 100644
--- a/runtime/dex_instruction_utils.h
+++ b/runtime/dex_instruction_utils.h
@@ -55,7 +55,7 @@
constexpr bool IsInstructionInvoke(Instruction::Code opcode) {
return Instruction::INVOKE_VIRTUAL <= opcode && opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
- opcode != Instruction::RETURN_VOID_BARRIER;
+ opcode != Instruction::RETURN_VOID_NO_BARRIER;
}
constexpr bool IsInstructionQuickInvoke(Instruction::Code opcode) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 70ee042..8351e22 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -30,6 +30,7 @@
#include "mirror/object_array-inl.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
+#include "debugger.h"
namespace art {
@@ -639,6 +640,14 @@
JValue result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame);
// Pop transition.
self->PopManagedStackFragment(fragment);
+
+ // Request a stack deoptimization if needed
+ mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
+ if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
+ self->SetException(Thread::GetDeoptimizationException());
+ self->SetDeoptimizationReturnValue(result);
+ }
+
// No need to restore the args since the method has already been run by the interpreter.
return result.GetJ();
}
@@ -950,14 +959,37 @@
called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called);
}
}
+
// Ensure that the called method's class is initialized.
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
linker->EnsureInitialized(soa.Self(), called_class, true, true);
if (LIKELY(called_class->IsInitialized())) {
- code = called->GetEntryPointFromQuickCompiledCode();
+ if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
+ // If we are single-stepping or the called method is deoptimized (by a
+ // breakpoint, for example), then we have to execute the called method
+ // with the interpreter.
+ code = GetQuickToInterpreterBridge();
+ } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) {
+ // If the caller is deoptimized (by a breakpoint, for example), we have to
+ // continue its execution with interpreter when returning from the called
+ // method. Because we do not want to execute the called method with the
+ // interpreter, we wrap its execution into the instrumentation stubs.
+ // When the called method returns, it will execute the instrumentation
+ // exit hook that will determine the need of the interpreter with a call
+ // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if
+ // it is needed.
+ code = GetQuickInstrumentationEntryPoint();
+ } else {
+ code = called->GetEntryPointFromQuickCompiledCode();
+ }
} else if (called_class->IsInitializing()) {
- if (invoke_type == kStatic) {
+ if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
+ // If we are single-stepping or the called method is deoptimized (by a
+ // breakpoint, for example), then we have to execute the called method
+ // with the interpreter.
+ code = GetQuickToInterpreterBridge();
+ } else if (invoke_type == kStatic) {
// Class is still initializing, go to oat and grab code (trampoline must be left in place
// until class is initialized to stop races between threads).
code = linker->GetQuickOatCodeFor(called);
@@ -1152,7 +1184,7 @@
gpr_index_--;
if (kMultiGPRegistersWidened) {
DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
- PushGpr(static_cast<int64_t>(bit_cast<uint32_t, int32_t>(val)));
+ PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
} else {
PushGpr(val);
}
@@ -1160,7 +1192,7 @@
stack_entries_++;
if (kMultiGPRegistersWidened) {
DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
- PushStack(static_cast<int64_t>(bit_cast<uint32_t, int32_t>(val)));
+ PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
} else {
PushStack(val);
}
@@ -1220,16 +1252,16 @@
void AdvanceFloat(float val) {
if (kNativeSoftFloatAbi) {
- AdvanceInt(bit_cast<float, uint32_t>(val));
+ AdvanceInt(bit_cast<uint32_t, float>(val));
} else {
if (HaveFloatFpr()) {
fpr_index_--;
if (kRegistersNeededForDouble == 1) {
if (kMultiFPRegistersWidened) {
- PushFpr8(bit_cast<double, uint64_t>(val));
+ PushFpr8(bit_cast<uint64_t, double>(val));
} else {
// No widening, just use the bits.
- PushFpr8(bit_cast<float, uint64_t>(val));
+ PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val)));
}
} else {
PushFpr4(val);
@@ -1240,9 +1272,9 @@
// Need to widen before storing: Note the "double" in the template instantiation.
// Note: We need to jump through those hoops to make the compiler happy.
DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
- PushStack(static_cast<uintptr_t>(bit_cast<double, uint64_t>(val)));
+ PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val)));
} else {
- PushStack(bit_cast<float, uintptr_t>(val));
+ PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val)));
}
fpr_index_ = 0;
}
@@ -1876,8 +1908,8 @@
case 'F': {
if (kRuntimeISA == kX86) {
// Convert back the result to float.
- double d = bit_cast<uint64_t, double>(result_f);
- return bit_cast<float, uint32_t>(static_cast<float>(d));
+ double d = bit_cast<double, uint64_t>(result_f);
+ return bit_cast<uint32_t, float>(static_cast<float>(d));
} else {
return result_f;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index dff8f4d..51cf558 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2061,7 +2061,6 @@
MutexLock mu(self, zygote_creation_lock_);
// Try to see if we have any Zygote spaces.
if (HasZygoteSpace()) {
- LOG(WARNING) << __FUNCTION__ << " called when we already have a zygote space.";
return;
}
Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote();
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 14f770d..1fb3252 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -788,7 +788,8 @@
OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(),
image_header.GetOatFileBegin(),
- !Runtime::Current()->IsAotCompiler(), error_msg);
+ !Runtime::Current()->IsAotCompiler(),
+ nullptr, error_msg);
if (oat_file == NULL) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
diff --git a/runtime/globals.h b/runtime/globals.h
index 0845475..ac8751c 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -109,6 +109,13 @@
static constexpr bool kPoisonHeapReferences = false;
#endif
+// If true, enable the tlab allocator by default.
+#ifdef ART_USE_TLAB
+static constexpr bool kUseTlab = true;
+#else
+static constexpr bool kUseTlab = false;
+#endif
+
// Kinds of tracing clocks.
enum class TraceClockSource {
kThreadCpu,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index b53b8cd..9adb4ac 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1030,7 +1030,8 @@
NthCallerVisitor visitor(self, 1, true);
visitor.WalkStack(true);
bool deoptimize = (visitor.caller != nullptr) &&
- (interpreter_stubs_installed_ || IsDeoptimized(visitor.caller));
+ (interpreter_stubs_installed_ || IsDeoptimized(visitor.caller) ||
+ Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller));
if (deoptimize) {
if (kVerboseInstrumentation) {
LOG(INFO) << StringPrintf("Deoptimizing %s by returning from %s with result %#" PRIx64 " in ",
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 26ab602..a310452 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -18,6 +18,7 @@
#include <cmath>
+#include "debugger.h"
#include "mirror/array-inl.h"
#include "unstarted_runtime.h"
@@ -284,7 +285,7 @@
HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
HandleWrapper<mirror::Object> h_reg(hs.NewHandleWrapper(®));
HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
- field_class = h_f->GetType(true);
+ field_class = h_f->GetType<true>();
}
if (!reg->VerifierInstanceOf(field_class)) {
// This should never happen.
@@ -616,8 +617,14 @@
<< PrettyMethod(new_shadow_frame->GetMethod());
UNREACHABLE();
}
- (new_shadow_frame->GetMethod()->GetEntryPointFromInterpreter())(self, code_item,
- new_shadow_frame, result);
+ // Force the use of interpreter when it is required by the debugger.
+ mirror::EntryPointFromInterpreter* entry;
+ if (UNLIKELY(Dbg::IsForcedInterpreterNeededForCalling(self, new_shadow_frame->GetMethod()))) {
+ entry = &art::artInterpreterToInterpreterBridge;
+ } else {
+ entry = new_shadow_frame->GetMethod()->GetEntryPointFromInterpreter();
+ }
+ entry(self, code_item, new_shadow_frame, result);
} else {
UnstartedRuntimeInvoke(self, code_item, new_shadow_frame, result, first_dest_reg);
}
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 15396d6..7d413c5 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -317,7 +317,10 @@
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
uint16_t size = switch_data[1];
- DCHECK_GT(size, 0);
+ // Return length of SPARSE_SWITCH if size is 0.
+ if (size == 0) {
+ return 3;
+ }
const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
DCHECK(IsAligned<4>(keys));
const int32_t* entries = keys + size;
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index af0a530..9c48df6 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -255,14 +255,8 @@
}
HANDLE_INSTRUCTION_END();
- HANDLE_INSTRUCTION_START(RETURN_VOID) {
+ HANDLE_INSTRUCTION_START(RETURN_VOID_NO_BARRIER) {
JValue result;
- if (do_access_check) {
- // If access checks are required then the dex-to-dex compiler and analysis of
- // whether the class has final fields hasn't been performed. Conservatively
- // perform the memory barrier now.
- QuasiAtomic::ThreadFenceForConstructor();
- }
self->AllowThreadSuspension();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
@@ -277,7 +271,7 @@
}
HANDLE_INSTRUCTION_END();
- HANDLE_INSTRUCTION_START(RETURN_VOID_BARRIER) {
+ HANDLE_INSTRUCTION_START(RETURN_VOID) {
QuasiAtomic::ThreadFenceForConstructor();
JValue result;
self->AllowThreadSuspension();
@@ -2440,7 +2434,7 @@
#define INSTRUMENTATION_INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) \
alt_op_##code: { \
if (Instruction::code != Instruction::RETURN_VOID && \
- Instruction::code != Instruction::RETURN_VOID_BARRIER && \
+ Instruction::code != Instruction::RETURN_VOID_NO_BARRIER && \
Instruction::code != Instruction::RETURN && \
Instruction::code != Instruction::RETURN_WIDE && \
Instruction::code != Instruction::RETURN_OBJECT) { \
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 9313c75..609faf5 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -170,14 +170,8 @@
inst = inst->Next_1xx();
break;
}
- case Instruction::RETURN_VOID: {
+ case Instruction::RETURN_VOID_NO_BARRIER: {
JValue result;
- if (do_access_check) {
- // If access checks are required then the dex-to-dex compiler and analysis of
- // whether the class has final fields hasn't been performed. Conservatively
- // perform the memory barrier now.
- QuasiAtomic::ThreadFenceForConstructor();
- }
self->AllowThreadSuspension();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -189,7 +183,7 @@
}
return result;
}
- case Instruction::RETURN_VOID_BARRIER: {
+ case Instruction::RETURN_VOID: {
QuasiAtomic::ThreadFenceForConstructor();
JValue result;
self->AllowThreadSuspension();
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index fbbc863..1b08e80 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -29,6 +29,7 @@
#include "mirror/array-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class.h"
+#include "mirror/field-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
@@ -219,19 +220,11 @@
PrettyDescriptor(klass).c_str());
return;
}
- // TODO: getDeclaredField calls GetType once the field is found to ensure a
- // NoClassDefFoundError is thrown if the field's type cannot be resolved.
- mirror::Class* jlr_Field = self->DecodeJObject(
- WellKnownClasses::java_lang_reflect_Field)->AsClass();
- StackHandleScope<1> hs(self);
- Handle<mirror::Object> field(hs.NewHandle(jlr_Field->AllocNonMovableObject(self)));
- CHECK(field.Get() != nullptr);
- mirror::ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>",
- "(Ljava/lang/reflect/ArtField;)V");
- uint32_t args[1];
- args[0] = StackReference<mirror::Object>::FromMirrorPtr(found).AsVRegValue();
- EnterInterpreterFromInvoke(self, c, field.Get(), args, nullptr);
- result->SetL(field.Get());
+ if (Runtime::Current()->IsActiveTransaction()) {
+ result->SetL(mirror::Field::CreateFromArtField<true>(self, found, true));
+ } else {
+ result->SetL(mirror::Field::CreateFromArtField<false>(self, found, true));
+ }
}
static void UnstartedVmClassLoaderFindLoadedClass(
@@ -460,7 +453,7 @@
static void UnstartedDoubleDoubleToRawLongBits(
Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
double in = shadow_frame->GetVRegDouble(arg_offset);
- result->SetJ(bit_cast<int64_t>(in));
+ result->SetJ(bit_cast<int64_t, double>(in));
}
static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache)
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index e68616f..09bfbf3 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -631,20 +631,20 @@
Locks::mutator_lock_->AssertNotHeld(self);
const char* path_str = path.empty() ? nullptr : path.c_str();
- void* handle = dlopen(path_str, RTLD_LAZY);
+ void* handle = dlopen(path_str, RTLD_NOW);
bool needs_native_bridge = false;
if (handle == nullptr) {
if (android::NativeBridgeIsSupported(path_str)) {
- handle = android::NativeBridgeLoadLibrary(path_str, RTLD_LAZY);
+ handle = android::NativeBridgeLoadLibrary(path_str, RTLD_NOW);
needs_native_bridge = true;
}
}
- VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_LAZY) returned " << handle << "]";
+ VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_NOW) returned " << handle << "]";
if (handle == nullptr) {
*error_msg = dlerror();
- LOG(ERROR) << "dlopen(\"" << path << "\", RTLD_LAZY) failed: " << *error_msg;
+ VLOG(jni) << "dlopen(\"" << path << "\", RTLD_NOW) failed: " << *error_msg;
return false;
}
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 4bf7142..c9a4483 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -133,7 +133,6 @@
case EK_METHOD_ENTRY:
case EK_METHOD_EXIT:
case EK_METHOD_EXIT_WITH_RETURN_VALUE:
- case EK_SINGLE_STEP:
case EK_FIELD_ACCESS:
case EK_FIELD_MODIFICATION:
return true;
@@ -278,16 +277,7 @@
Dbg::UnconfigureStep(pMod->step.threadId);
}
}
- if (pEvent->eventKind == EK_SINGLE_STEP) {
- // Special case for single-steps where we want to avoid the slow pattern deoptimize/undeoptimize
- // loop between each single-step. In a IDE, this would happens each time the user click on the
- // "single-step" button. Here we delay the full undeoptimization to the next resume
- // (VM.Resume or ThreadReference.Resume) or the end of the debugging session (VM.Dispose or
- // runtime shutdown).
- // Therefore, in a singles-stepping sequence, only the first single-step will trigger a full
- // deoptimization and only the last single-step will trigger a full undeoptimization.
- Dbg::DelayFullUndeoptimization();
- } else if (NeedsFullDeoptimization(pEvent->eventKind)) {
+ if (NeedsFullDeoptimization(pEvent->eventKind)) {
CHECK_EQ(req.GetKind(), DeoptimizationRequest::kNothing);
CHECK(req.Method() == nullptr);
req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index c7083dc..add1394 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -295,7 +295,6 @@
*/
static JdwpError VM_Resume(JdwpState*, Request*, ExpandBuf*)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Dbg::ProcessDelayedFullUndeoptimizations();
Dbg::ResumeVM();
return ERR_NONE;
}
@@ -989,8 +988,6 @@
return ERR_NONE;
}
- Dbg::ProcessDelayedFullUndeoptimizations();
-
Dbg::ResumeThread(thread_id);
return ERR_NONE;
}
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 3d69796..e2b88a5 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -322,8 +322,6 @@
CHECK(event_list_ == nullptr);
}
- Dbg::ProcessDelayedFullUndeoptimizations();
-
/*
* Should not have one of these in progress. If the debugger went away
* mid-request, though, we could see this.
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 13c1f81..5dc739e 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -128,6 +128,10 @@
bool Jit::CompileMethod(mirror::ArtMethod* method, Thread* self) {
DCHECK(!method->IsRuntimeMethod());
+ if (Dbg::IsDebuggerActive() && Dbg::MethodHasAnyBreakpoints(method)) {
+ VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to breakpoint";
+ return false;
+ }
const bool result = jit_compile_method_(jit_compiler_handle_, method, self);
if (result) {
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index aa8c717..8a20e39 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -47,33 +47,50 @@
static constexpr size_t kMaxCapacity = 1 * GB;
static constexpr size_t kDefaultCapacity = 2 * MB;
+ // Create the code cache with a code + data capacity equal to "capacity", error message is passed
+ // in the out arg error_msg.
static JitCodeCache* Create(size_t capacity, std::string* error_msg);
const uint8_t* CodeCachePtr() const {
return code_cache_ptr_;
}
+
size_t CodeCacheSize() const {
return code_cache_ptr_ - code_cache_begin_;
}
+
size_t CodeCacheRemain() const {
return code_cache_end_ - code_cache_ptr_;
}
+
+ const uint8_t* DataCachePtr() const {
+ return data_cache_ptr_;
+ }
+
size_t DataCacheSize() const {
return data_cache_ptr_ - data_cache_begin_;
}
+
size_t DataCacheRemain() const {
return data_cache_end_ - data_cache_ptr_;
}
+
size_t NumMethods() const {
return num_methods_;
}
+ // Return true if the code cache contains the code pointer which si the entrypoint of the method.
bool ContainsMethod(mirror::ArtMethod* method) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Return true if the code cache contains a code ptr.
bool ContainsCodePtr(const void* ptr) const;
+ // Reserve a region of code of size at least "size". Returns nullptr if there is no more room.
uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
+ // Add a data array of size (end - begin) with the associated contents, returns nullptr if there
+ // is no more room.
uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
LOCKS_EXCLUDED(lock_);
@@ -81,14 +98,19 @@
const void* GetCodeFor(mirror::ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ // Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the
+ // entrypoint isn't within the cache.
void SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
private:
// Takes ownership of code_mem_map.
explicit JitCodeCache(MemMap* code_mem_map);
+
+ // Unimplemented, TODO: Determine if it is necessary.
void FlushInstructionCache();
+ // Lock which guards.
Mutex lock_;
// Mem map which holds code and data. We do this since we need to have 32 bit offsets from method
// headers in code cache which point to things in the data cache. If the maps are more than 4GB
@@ -106,7 +128,7 @@
// TODO: This relies on methods not moving.
// This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
// required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
- SafeMap<mirror::ArtMethod*, const void*> method_code_map_;
+ SafeMap<mirror::ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_);
DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
};
diff --git a/runtime/jit/jit_code_cache_test.cc b/runtime/jit/jit_code_cache_test.cc
new file mode 100644
index 0000000..2155552
--- /dev/null
+++ b/runtime/jit/jit_code_cache_test.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_runtime_test.h"
+
+#include "class_linker.h"
+#include "jit_code_cache.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
+#include "utils.h"
+
+namespace art {
+namespace jit {
+
+class JitCodeCacheTest : public CommonRuntimeTest {
+ public:
+};
+
+TEST_F(JitCodeCacheTest, TestCoverage) {
+ std::string error_msg;
+ constexpr size_t kSize = 1 * MB;
+ std::unique_ptr<JitCodeCache> code_cache(
+ JitCodeCache::Create(kSize, &error_msg));
+ ASSERT_TRUE(code_cache.get() != nullptr) << error_msg;
+ ASSERT_TRUE(code_cache->CodeCachePtr() != nullptr);
+ ASSERT_EQ(code_cache->CodeCacheSize(), 0u);
+ ASSERT_GT(code_cache->CodeCacheRemain(), 0u);
+ ASSERT_TRUE(code_cache->DataCachePtr() != nullptr);
+ ASSERT_EQ(code_cache->DataCacheSize(), 0u);
+ ASSERT_GT(code_cache->DataCacheRemain(), 0u);
+ ASSERT_EQ(code_cache->CodeCacheRemain() + code_cache->DataCacheRemain(), kSize);
+ ASSERT_EQ(code_cache->NumMethods(), 0u);
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ uint8_t* const reserved_code = code_cache->ReserveCode(soa.Self(), 4 * KB);
+ ASSERT_TRUE(reserved_code != nullptr);
+ ASSERT_TRUE(code_cache->ContainsCodePtr(reserved_code));
+ ASSERT_EQ(code_cache->NumMethods(), 1u);
+ ClassLinker* const cl = Runtime::Current()->GetClassLinker();
+ auto h_method = hs.NewHandle(cl->AllocArtMethod(soa.Self()));
+ ASSERT_FALSE(code_cache->ContainsMethod(h_method.Get()));
+ h_method->SetEntryPointFromQuickCompiledCode(reserved_code);
+ ASSERT_TRUE(code_cache->ContainsMethod(h_method.Get()));
+ ASSERT_EQ(code_cache->GetCodeFor(h_method.Get()), reserved_code);
+ // Save the code and then change it.
+ code_cache->SaveCompiledCode(h_method.Get(), reserved_code);
+ h_method->SetEntryPointFromQuickCompiledCode(nullptr);
+ ASSERT_EQ(code_cache->GetCodeFor(h_method.Get()), reserved_code);
+ const uint8_t data_arr[] = {1, 2, 3, 4, 5};
+ uint8_t* data_ptr = code_cache->AddDataArray(soa.Self(), data_arr, data_arr + sizeof(data_arr));
+ ASSERT_TRUE(data_ptr != nullptr);
+ ASSERT_EQ(memcmp(data_ptr, data_arr, sizeof(data_arr)), 0);
+}
+
+TEST_F(JitCodeCacheTest, TestOverflow) {
+ std::string error_msg;
+ constexpr size_t kSize = 1 * MB;
+ std::unique_ptr<JitCodeCache> code_cache(
+ JitCodeCache::Create(kSize, &error_msg));
+ ASSERT_TRUE(code_cache.get() != nullptr) << error_msg;
+ ASSERT_TRUE(code_cache->CodeCachePtr() != nullptr);
+ size_t code_bytes = 0;
+ size_t data_bytes = 0;
+ constexpr size_t kCodeArrSize = 4 * KB;
+ constexpr size_t kDataArrSize = 4 * KB;
+ uint8_t data_arr[kDataArrSize] = {53};
+ // Add code and data until we are full.
+ uint8_t* code_ptr = nullptr;
+ uint8_t* data_ptr = nullptr;
+ do {
+ code_ptr = code_cache->ReserveCode(Thread::Current(), kCodeArrSize);
+ data_ptr = code_cache->AddDataArray(Thread::Current(), data_arr, data_arr + kDataArrSize);
+ if (code_ptr != nullptr) {
+ code_bytes += kCodeArrSize;
+ }
+ if (data_ptr != nullptr) {
+ data_bytes += kDataArrSize;
+ }
+ } while (code_ptr != nullptr || data_ptr != nullptr);
+ // Make sure we added a reasonable amount
+ CHECK_GT(code_bytes, 0u);
+ CHECK_LE(code_bytes, kSize);
+ CHECK_GT(data_bytes, 0u);
+ CHECK_LE(data_bytes, kSize);
+ CHECK_GE(code_bytes + data_bytes, kSize * 4 / 5);
+}
+
+} // namespace jit
+} // namespace art
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6063e1e..5e38470 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -41,6 +41,7 @@
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
+#include "mirror/field.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
@@ -164,8 +165,10 @@
// See if the override ClassLoader is set for gtests.
class_loader = soa.Decode<mirror::ClassLoader*>(soa.Self()->GetClassLoaderOverride());
if (class_loader != nullptr) {
- // If so, CommonCompilerTest should have set UseCompileTimeClassPath.
- CHECK(Runtime::Current()->UseCompileTimeClassPath());
+ // If so, CommonCompilerTest should have marked the runtime as a compiler not compiling an
+ // image.
+ CHECK(Runtime::Current()->IsAotCompiler());
+ CHECK(!Runtime::Current()->IsCompilingBootImage());
return class_loader;
}
// Use the BOOTCLASSPATH.
@@ -344,7 +347,13 @@
static jfieldID FromReflectedField(JNIEnv* env, jobject jlr_field) {
CHECK_NON_NULL_ARGUMENT(jlr_field);
ScopedObjectAccess soa(env);
- return soa.EncodeField(mirror::ArtField::FromReflectedField(soa, jlr_field));
+ mirror::Object* obj_field = soa.Decode<mirror::Object*>(jlr_field);
+ if (obj_field->GetClass() != mirror::Field::StaticClass()) {
+ // Not even a java.lang.reflect.Field, return nullptr.
+ return nullptr;
+ }
+ auto* field = static_cast<mirror::Field*>(obj_field);
+ return soa.EncodeField(field->GetArtField());
}
static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) {
@@ -371,14 +380,7 @@
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
mirror::ArtField* f = soa.DecodeField(fid);
- ScopedLocalRef<jobject> art_field(env, soa.AddLocalReference<jobject>(f));
- jobject reflect_field = env->AllocObject(WellKnownClasses::java_lang_reflect_Field);
- if (env->ExceptionCheck()) {
- return nullptr;
- }
- SetObjectField(env, reflect_field,
- WellKnownClasses::java_lang_reflect_Field_artField, art_field.get());
- return reflect_field;
+ return soa.AddLocalReference<jobject>(mirror::Field::CreateFromArtField(soa.Self(), f, true));
}
static jclass GetObjectClass(JNIEnv* env, jobject java_object) {
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 1048214..5516eab 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1355,24 +1355,38 @@
s = env_->NewStringUTF("\xed\xa0\x81\xed\xb0\x80");
EXPECT_NE(s, nullptr);
EXPECT_EQ(2, env_->GetStringLength(s));
- // Note that this uses 2 x 3 byte UTF sequences, one
- // for each half of the surrogate pair.
- EXPECT_EQ(6, env_->GetStringUTFLength(s));
+
+ // The surrogate pair gets encoded into a 4 byte UTF sequence..
+ EXPECT_EQ(4, env_->GetStringUTFLength(s));
const char* chars = env_->GetStringUTFChars(s, nullptr);
- EXPECT_STREQ("\xed\xa0\x81\xed\xb0\x80", chars);
+ EXPECT_STREQ("\xf0\x90\x90\x80", chars);
env_->ReleaseStringUTFChars(s, chars);
+ // .. but is stored as is in the utf-16 representation.
+ const jchar* jchars = env_->GetStringChars(s, nullptr);
+ EXPECT_EQ(0xd801, jchars[0]);
+ EXPECT_EQ(0xdc00, jchars[1]);
+ env_->ReleaseStringChars(s, jchars);
+
// 4 byte UTF sequence appended to an encoded surrogate pair.
s = env_->NewStringUTF("\xed\xa0\x81\xed\xb0\x80 \xf0\x9f\x8f\xa0");
EXPECT_NE(s, nullptr);
- EXPECT_EQ(5, env_->GetStringLength(s));
- EXPECT_EQ(13, env_->GetStringUTFLength(s));
- chars = env_->GetStringUTFChars(s, nullptr);
+
// The 4 byte sequence {0xf0, 0x9f, 0x8f, 0xa0} is converted into a surrogate
- // pair {0xd83c, 0xdfe0} which is then converted into a two three byte
- // sequences {0xed 0xa0, 0xbc} and {0xed, 0xbf, 0xa0}, one for each half of
- // the surrogate pair.
- EXPECT_STREQ("\xed\xa0\x81\xed\xb0\x80 \xed\xa0\xbc\xed\xbf\xa0", chars);
+ // pair {0xd83c, 0xdfe0}.
+ EXPECT_EQ(5, env_->GetStringLength(s));
+ jchars = env_->GetStringChars(s, nullptr);
+ // The first surrogate pair, encoded as such in the input.
+ EXPECT_EQ(0xd801, jchars[0]);
+ EXPECT_EQ(0xdc00, jchars[1]);
+ // The second surrogate pair, from the 4 byte UTF sequence in the input.
+ EXPECT_EQ(0xd83c, jchars[3]);
+ EXPECT_EQ(0xdfe0, jchars[4]);
+ env_->ReleaseStringChars(s, jchars);
+
+ EXPECT_EQ(9, env_->GetStringUTFLength(s));
+ chars = env_->GetStringUTFChars(s, nullptr);
+ EXPECT_STREQ("\xf0\x90\x90\x80 \xf0\x9f\x8f\xa0", chars);
env_->ReleaseStringUTFChars(s, chars);
// A string with 1, 2, 3 and 4 byte UTF sequences with spaces
@@ -1380,7 +1394,7 @@
s = env_->NewStringUTF("\x24 \xc2\xa2 \xe2\x82\xac \xf0\x9f\x8f\xa0");
EXPECT_NE(s, nullptr);
EXPECT_EQ(8, env_->GetStringLength(s));
- EXPECT_EQ(15, env_->GetStringUTFLength(s));
+ EXPECT_EQ(13, env_->GetStringUTFLength(s));
}
TEST_F(JniInternalTest, NewString) {
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index 939a1a9..6a784eb 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -19,6 +19,7 @@
#include <stdint.h>
+#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/value_object.h"
@@ -47,39 +48,28 @@
uint8_t* end() const { return start() + size_; }
// Load value of type `T` at `offset`. The memory address corresponding
- // to `offset` should be word-aligned.
- template<typename T> T Load(uintptr_t offset) const {
- // TODO: DCHECK that the address is word-aligned.
- return *ComputeInternalPointer<T>(offset);
+ // to `offset` should be word-aligned (on ARM, this is a requirement).
+ template<typename T>
+ ALWAYS_INLINE T Load(uintptr_t offset) const {
+ T* address = ComputeInternalPointer<T>(offset);
+ DCHECK(IsWordAligned(address));
+ return *address;
}
// Store `value` (of type `T`) at `offset`. The memory address
- // corresponding to `offset` should be word-aligned.
- template<typename T> void Store(uintptr_t offset, T value) const {
- // TODO: DCHECK that the address is word-aligned.
- *ComputeInternalPointer<T>(offset) = value;
- }
-
- // TODO: Local hack to prevent name clashes between two conflicting
- // implementations of bit_cast:
- // - art::bit_cast<Destination, Source> runtime/base/casts.h, and
- // - art::bit_cast<Source, Destination> from runtime/utils.h.
- // Remove this when these routines have been merged.
- template<typename Source, typename Destination>
- static Destination local_bit_cast(Source in) {
- static_assert(sizeof(Source) <= sizeof(Destination),
- "Size of Source not <= size of Destination");
- union {
- Source u;
- Destination v;
- } tmp;
- tmp.u = in;
- return tmp.v;
+ // corresponding to `offset` should be word-aligned (on ARM, this is
+ // a requirement).
+ template<typename T>
+ ALWAYS_INLINE void Store(uintptr_t offset, T value) const {
+ T* address = ComputeInternalPointer<T>(offset);
+ DCHECK(IsWordAligned(address));
+ *address = value;
}
// Load value of type `T` at `offset`. The memory address corresponding
// to `offset` does not need to be word-aligned.
- template<typename T> T LoadUnaligned(uintptr_t offset) const {
+ template<typename T>
+ ALWAYS_INLINE T LoadUnaligned(uintptr_t offset) const {
// Equivalent unsigned integer type corresponding to T.
typedef typename UnsignedIntegerType<sizeof(T)>::type U;
U equivalent_unsigned_integer_value = 0;
@@ -88,15 +78,16 @@
equivalent_unsigned_integer_value +=
*ComputeInternalPointer<uint8_t>(offset + i) << (i * kBitsPerByte);
}
- return local_bit_cast<U, T>(equivalent_unsigned_integer_value);
+ return bit_cast<T, U>(equivalent_unsigned_integer_value);
}
// Store `value` (of type `T`) at `offset`. The memory address
// corresponding to `offset` does not need to be word-aligned.
- template<typename T> void StoreUnaligned(uintptr_t offset, T value) const {
+ template<typename T>
+ ALWAYS_INLINE void StoreUnaligned(uintptr_t offset, T value) const {
// Equivalent unsigned integer type corresponding to T.
typedef typename UnsignedIntegerType<sizeof(T)>::type U;
- U equivalent_unsigned_integer_value = local_bit_cast<T, U>(value);
+ U equivalent_unsigned_integer_value = bit_cast<U, T>(value);
// Write the value byte by byte in a little-endian fashion.
for (size_t i = 0; i < sizeof(U); ++i) {
*ComputeInternalPointer<uint8_t>(offset + i) =
@@ -104,19 +95,20 @@
}
}
- template<typename T> T* PointerTo(uintptr_t offset) const {
+ template<typename T>
+ ALWAYS_INLINE T* PointerTo(uintptr_t offset) const {
return ComputeInternalPointer<T>(offset);
}
// Load a single bit in the region. The bit at offset 0 is the least
// significant bit in the first byte.
- bool LoadBit(uintptr_t bit_offset) const {
+ ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
uint8_t bit_mask;
uint8_t byte = *ComputeBitPointer(bit_offset, &bit_mask);
return byte & bit_mask;
}
- void StoreBit(uintptr_t bit_offset, bool value) const {
+ ALWAYS_INLINE void StoreBit(uintptr_t bit_offset, bool value) const {
uint8_t bit_mask;
uint8_t* byte = ComputeBitPointer(bit_offset, &bit_mask);
if (value) {
@@ -126,6 +118,31 @@
}
}
+ // Load `length` bits from the region starting at bit offset `bit_offset`.
+ // The bit at the smallest offset is the least significant bit in the
+ // loaded value. `length` must not be larger than the number of bits
+ // contained in the return value (32).
+ uint32_t LoadBits(uintptr_t bit_offset, size_t length) const {
+ CHECK_LE(length, sizeof(uint32_t) * kBitsPerByte);
+ uint32_t value = 0u;
+ for (size_t i = 0; i < length; ++i) {
+ value |= LoadBit(bit_offset + i) << i;
+ }
+ return value;
+ }
+
+ // Store `value` on `length` bits in the region starting at bit offset
+ // `bit_offset`. The bit at the smallest offset is the least significant
+ // bit of the stored `value`. `value` must not be larger than `length`
+ // bits.
+ void StoreBits(uintptr_t bit_offset, uint32_t value, size_t length) {
+ CHECK_LT(value, 2u << length);
+ for (size_t i = 0; i < length; ++i) {
+ bool ith_bit = value & (1 << i);
+ StoreBit(bit_offset + i, ith_bit);
+ }
+ }
+
void CopyFrom(size_t offset, const MemoryRegion& from) const;
// Compute a sub memory region based on an existing one.
@@ -142,7 +159,8 @@
}
private:
- template<typename T> T* ComputeInternalPointer(size_t offset) const {
+ template<typename T>
+ ALWAYS_INLINE T* ComputeInternalPointer(size_t offset) const {
CHECK_GE(size(), sizeof(T));
CHECK_LE(offset, size() - sizeof(T));
return reinterpret_cast<T*>(start() + offset);
@@ -150,13 +168,20 @@
// Locate the bit with the given offset. Returns a pointer to the byte
// containing the bit, and sets bit_mask to the bit within that byte.
- uint8_t* ComputeBitPointer(uintptr_t bit_offset, uint8_t* bit_mask) const {
+ ALWAYS_INLINE uint8_t* ComputeBitPointer(uintptr_t bit_offset, uint8_t* bit_mask) const {
uintptr_t bit_remainder = (bit_offset & (kBitsPerByte - 1));
*bit_mask = (1U << bit_remainder);
uintptr_t byte_offset = (bit_offset >> kBitsPerByteLog2);
return ComputeInternalPointer<uint8_t>(byte_offset);
}
+ // Is `address` aligned on a machine word?
+ template<typename T> static bool IsWordAligned(const T* address) {
+ // Word alignment in bytes.
+ size_t kWordAlignment = GetInstructionSetPointerSize(kRuntimeISA);
+ return IsAlignedParam(address, kWordAlignment);
+ }
+
void* pointer_;
size_t size_;
};
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
new file mode 100644
index 0000000..6d4c0f6
--- /dev/null
+++ b/runtime/mirror/accessible_object.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_ACCESSIBLE_OBJECT_H_
+#define ART_RUNTIME_MIRROR_ACCESSIBLE_OBJECT_H_
+
+#include "class.h"
+#include "gc_root.h"
+#include "object.h"
+#include "object_callbacks.h"
+#include "read_barrier_option.h"
+#include "thread.h"
+
+namespace art {
+
+namespace mirror {
+
+// C++ mirror of java.lang.reflect.AccessibleObject
+class MANAGED AccessibleObject : public Object {
+ public:
+ static MemberOffset FlagOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(AccessibleObject, flag_);
+ }
+
+ template<bool kTransactionActive>
+ void SetAccessible(bool value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(padding_);
+ return SetFieldBoolean<kTransactionActive>(FlagOffset(), value ? 1u : 0u);
+ }
+
+ bool IsAccessible() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldBoolean(FlagOffset());
+ }
+
+ private:
+ uint8_t flag_;
+ // Padding required for now since "packed" will cause reflect.Field fields to not be aligned
+ // otherwise.
+ uint8_t padding_[3];
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AccessibleObject);
+};
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_ACCESSIBLE_OBJECT_H_
diff --git a/runtime/mirror/art_field-inl.h b/runtime/mirror/art_field-inl.h
index 2b406bd..986852f 100644
--- a/runtime/mirror/art_field-inl.h
+++ b/runtime/mirror/art_field-inl.h
@@ -34,7 +34,7 @@
namespace mirror {
inline uint32_t ArtField::ClassSize() {
- uint32_t vtable_entries = Object::kVTableLength + 6;
+ uint32_t vtable_entries = Object::kVTableLength;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
}
@@ -290,16 +290,19 @@
return GetTypeAsPrimitiveType() != Primitive::kPrimNot;
}
-inline Class* ArtField::GetType(bool resolve) {
- uint32_t field_index = GetDexFieldIndex();
- if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
+template <bool kResolve>
+inline Class* ArtField::GetType() {
+ const uint32_t field_index = GetDexFieldIndex();
+ auto* declaring_class = GetDeclaringClass();
+ if (UNLIKELY(declaring_class->IsProxyClass())) {
return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(),
GetTypeDescriptor());
}
- const DexFile* dex_file = GetDexFile();
+ auto* dex_cache = declaring_class->GetDexCache();
+ const DexFile* const dex_file = dex_cache->GetDexFile();
const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index);
- mirror::Class* type = GetDexCache()->GetResolvedType(field_id.type_idx_);
- if (resolve && (type == nullptr)) {
+ mirror::Class* type = dex_cache->GetResolvedType(field_id.type_idx_);
+ if (kResolve && UNLIKELY(type == nullptr)) {
type = Runtime::Current()->GetClassLinker()->ResolveType(field_id.type_idx_, this);
CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
}
@@ -318,12 +321,19 @@
return GetDexCache()->GetDexFile();
}
-inline ArtField* ArtField::FromReflectedField(const ScopedObjectAccessAlreadyRunnable& soa,
- jobject jlr_field) {
- mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_reflect_Field_artField);
- mirror::ArtField* field = f->GetObject(soa.Decode<mirror::Object*>(jlr_field))->AsArtField();
- DCHECK(field != nullptr);
- return field;
+inline String* ArtField::GetStringName(Thread* self, bool resolve) {
+ auto dex_field_index = GetDexFieldIndex();
+ CHECK_NE(dex_field_index, DexFile::kDexNoIndex);
+ auto* dex_cache = GetDexCache();
+ const auto* dex_file = dex_cache->GetDexFile();
+ const auto& field_id = dex_file->GetFieldId(dex_field_index);
+ auto* name = dex_cache->GetResolvedString(field_id.name_idx_);
+ if (resolve && name == nullptr) {
+ StackHandleScope<1> hs(self);
+ name = Runtime::Current()->GetClassLinker()->ResolveString(
+ *dex_file, field_id.name_idx_, hs.NewHandle(dex_cache));
+ }
+ return name;
}
} // namespace mirror
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index 3cea4a1..4c36753 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -45,7 +45,7 @@
void ArtField::SetOffset(MemberOffset num_bytes) {
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
if (kIsDebugBuild && Runtime::Current()->IsAotCompiler() &&
- !Runtime::Current()->UseCompileTimeClassPath()) {
+ Runtime::Current()->IsCompilingBootImage()) {
Primitive::Type type = GetTypeAsPrimitiveType();
if (type == Primitive::kPrimDouble || type == Primitive::kPrimLong) {
DCHECK_ALIGNED(num_bytes.Uint32Value(), 8);
diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h
index a1d8844..d640165 100644
--- a/runtime/mirror/art_field.h
+++ b/runtime/mirror/art_field.h
@@ -47,10 +47,6 @@
return sizeof(ArtField);
}
- ALWAYS_INLINE static ArtField* FromReflectedField(const ScopedObjectAccessAlreadyRunnable& soa,
- jobject jlr_field)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -155,13 +151,17 @@
const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Resolves / returns the name from the dex cache.
+ String* GetStringName(Thread* self, bool resolve) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Primitive::Type GetTypeAsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* GetType(bool resolve) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template <bool kResolve>
+ Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index c27c6e9..0ccf5db 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -230,10 +230,6 @@
return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
}
-inline StackMap ArtMethod::GetStackMap(uint32_t native_pc_offset) {
- return GetOptimizedCodeInfo().GetStackMapForNativePcOffset(native_pc_offset);
-}
-
inline CodeInfo ArtMethod::GetOptimizedCodeInfo() {
DCHECK(IsOptimized(sizeof(void*)));
const void* code_pointer = GetQuickOatCodePointer(sizeof(void*));
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index bc58709..c1f7594 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -202,8 +202,8 @@
const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
if (IsOptimized(sizeof(void*))) {
- uint32_t ret = GetStackMap(sought_offset).GetDexPc();
- return ret;
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ return code_info.GetStackMapForNativePcOffset(sought_offset).GetDexPc(code_info);
}
MappingTable table(entry_point != nullptr ?
@@ -401,7 +401,9 @@
Runtime* runtime = Runtime::Current();
// Call the invoke stub, passing everything as arguments.
- if (UNLIKELY(!runtime->IsStarted())) {
+ // If the runtime is not yet started or it is required by the debugger, then perform the
+ // Invocation by the interpreter.
+ if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
if (IsStatic()) {
art::interpreter::EnterInterpreterFromInvoke(self, this, nullptr, args, result);
} else {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index d878f25..82e5d00 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -125,6 +125,14 @@
return (GetAccessFlags() & kAccNative) != 0;
}
+ bool ShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccDontInline) != 0;
+ }
+
+ void SetShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetAccessFlags(GetAccessFlags() | kAccDontInline);
+ }
+
bool IsFastNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t mask = kAccFastNative | kAccNative;
return (GetAccessFlags() & mask) == mask;
@@ -348,7 +356,6 @@
const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- StackMap GetStackMap(uint32_t native_pc_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Callers should wrap the uint8_t* in a GcMap instance for convenient access.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 97052f1..c368dc6 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -45,7 +45,7 @@
bool is_variable_size = IsVariableSize<kVerifyFlags, kReadBarrierOption>();
CHECK(!is_variable_size) << " class=" << PrettyTypeOf(this);
}
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_));
+ return GetField32(ObjectSizeOffset());
}
inline Class* Class::GetSuperClass() {
@@ -523,7 +523,7 @@
<< " IsArtField=" << (this == ArtField::GetJavaLangReflectArtField())
<< " IsArtMethod=" << (this == ArtMethod::GetJavaLangReflectArtMethod())
<< " descriptor=" << PrettyDescriptor(this);
- return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+ return GetField32<kVerifyFlags>(AccessFlagsOffset());
}
inline String* Class::GetName() {
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 9fa6073..29851a9 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -228,8 +228,12 @@
os << " interfaces (" << num_direct_interfaces << "):\n";
for (size_t i = 0; i < num_direct_interfaces; ++i) {
Class* interface = GetDirectInterface(self, h_this, i);
- const ClassLoader* cl = interface->GetClassLoader();
- os << StringPrintf(" %2zd: %s (cl=%p)\n", i, PrettyClass(interface).c_str(), cl);
+ if (interface == nullptr) {
+ os << StringPrintf(" %2zd: nullptr!\n", i);
+ } else {
+ const ClassLoader* cl = interface->GetClassLoader();
+ os << StringPrintf(" %2zd: %s (cl=%p)\n", i, PrettyClass(interface).c_str(), cl);
+ }
}
}
if (!IsLoaded()) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index e7f7c6e..2dff383 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -204,6 +204,9 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static MemberOffset AccessFlagsOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
+ }
void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -527,6 +530,9 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
uint32_t GetObjectSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static MemberOffset ObjectSizeOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
+ }
void SetObjectSize(uint32_t new_object_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!IsVariableSize());
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
new file mode 100644
index 0000000..24ebc48
--- /dev/null
+++ b/runtime/mirror/field-inl.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_FIELD_INL_H_
+#define ART_RUNTIME_MIRROR_FIELD_INL_H_
+
+#include "field.h"
+
+#include "art_field-inl.h"
+#include "runtime-inl.h"
+
+namespace art {
+
+namespace mirror {
+
+template <bool kTransactionActive>
+inline mirror::Field* Field::CreateFromArtField(Thread* self, mirror::ArtField* field,
+ bool force_resolve) {
+ CHECK(!kMovingFields);
+ // Try to resolve type before allocating since this is a thread suspension point.
+ mirror::Class* type = field->GetType<true>();
+
+ if (type == nullptr) {
+ if (force_resolve) {
+ if (kIsDebugBuild) {
+ self->AssertPendingException();
+ }
+ return nullptr;
+ } else {
+ // Can't resolve, clear the exception if it isn't OOME and continue with a null type.
+ mirror::Throwable* exception = self->GetException();
+ if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) {
+ return nullptr;
+ }
+ self->ClearException();
+ }
+ }
+ StackHandleScope<1> hs(self);
+ auto ret = hs.NewHandle(static_cast<Field*>(StaticClass()->AllocObject(self)));
+ if (ret.Get() == nullptr) {
+ if (kIsDebugBuild) {
+ self->AssertPendingException();
+ }
+ return nullptr;
+ }
+ auto dex_field_index = field->GetDexFieldIndex();
+ auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index);
+ if (resolved_field != nullptr) {
+ DCHECK_EQ(resolved_field, field);
+ } else {
+ // We rely on the field being resolved so that we can back to the ArtField
+ // (i.e. FromReflectedMethod).
+ field->GetDexCache()->SetResolvedField(dex_field_index, field);
+ }
+ ret->SetType<kTransactionActive>(type);
+ ret->SetDeclaringClass<kTransactionActive>(field->GetDeclaringClass());
+ ret->SetAccessFlags<kTransactionActive>(field->GetAccessFlags());
+ ret->SetDexFieldIndex<kTransactionActive>(dex_field_index);
+ ret->SetOffset<kTransactionActive>(field->GetOffset().Int32Value());
+ return ret.Get();
+}
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_FIELD_INL_H_
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
new file mode 100644
index 0000000..1724682
--- /dev/null
+++ b/runtime/mirror/field.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "field-inl.h"
+
+#include "dex_cache-inl.h"
+#include "object_array-inl.h"
+#include "object-inl.h"
+
+namespace art {
+namespace mirror {
+
+GcRoot<Class> Field::static_class_;
+GcRoot<Class> Field::array_class_;
+
+void Field::SetClass(Class* klass) {
+ CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ static_class_ = GcRoot<Class>(klass);
+}
+
+void Field::ResetClass() {
+ CHECK(!static_class_.IsNull());
+ static_class_ = GcRoot<Class>(nullptr);
+}
+
+void Field::SetArrayClass(Class* klass) {
+ CHECK(array_class_.IsNull()) << array_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ array_class_ = GcRoot<Class>(klass);
+}
+
+void Field::ResetArrayClass() {
+ CHECK(!array_class_.IsNull());
+ array_class_ = GcRoot<Class>(nullptr);
+}
+
+void Field::VisitRoots(RootCallback* callback, void* arg) {
+ static_class_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+ array_class_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+}
+
+ArtField* Field::GetArtField() {
+ mirror::DexCache* const dex_cache = GetDeclaringClass()->GetDexCache();
+ mirror::ArtField* const art_field = dex_cache->GetResolvedField(GetDexFieldIndex());
+ CHECK(art_field != nullptr);
+ return art_field;
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
new file mode 100644
index 0000000..f54340a
--- /dev/null
+++ b/runtime/mirror/field.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_FIELD_H_
+#define ART_RUNTIME_MIRROR_FIELD_H_
+
+#include "accessible_object.h"
+#include "gc_root.h"
+#include "object.h"
+#include "object_callbacks.h"
+#include "read_barrier_option.h"
+
+namespace art {
+
+struct FieldOffsets;
+
+namespace mirror {
+
+class ArtField;
+class Class;
+class String;
+
+// C++ mirror of java.lang.reflect.Field.
+class MANAGED Field : public AccessibleObject {
+ public:
+ static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return static_class_.Read();
+ }
+
+ static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return array_class_.Read();
+ }
+
+ ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_));
+ }
+
+ mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_));
+ }
+
+ uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_));
+ }
+
+ bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccStatic) != 0;
+ }
+
+ bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccFinal) != 0;
+ }
+
+ bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccVolatile) != 0;
+ }
+
+ ALWAYS_INLINE Primitive::Type GetTypeAsPrimitiveType()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetType()->GetPrimitiveType();
+ }
+
+ mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject<mirror::Class>(OFFSET_OF_OBJECT_MEMBER(Field, type_));
+ }
+
+ int32_t GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
+ }
+
+ static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void VisitRoots(RootCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Slow, try to use only for PrettyField and such.
+ mirror::ArtField* GetArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template <bool kTransactionActive = false>
+ static mirror::Field* CreateFromArtField(Thread* self, mirror::ArtField* field,
+ bool force_resolve)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ HeapReference<mirror::Class> declaring_class_;
+ HeapReference<mirror::Class> type_;
+ int32_t access_flags_;
+ int32_t dex_field_index_;
+ int32_t offset_;
+
+ template<bool kTransactionActive>
+ void SetDeclaringClass(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
+ }
+
+ template<bool kTransactionActive>
+ void SetType(mirror::Class* type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
+ }
+
+ template<bool kTransactionActive>
+ void SetAccessFlags(uint32_t flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), flags);
+ }
+
+ template<bool kTransactionActive>
+ void SetDexFieldIndex(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_), idx);
+ }
+
+ template<bool kTransactionActive>
+ void SetOffset(uint32_t offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, offset_), offset);
+ }
+
+ static GcRoot<Class> static_class_; // java.lang.reflect.Field.class.
+ static GcRoot<Class> array_class_; // array of java.lang.reflect.Field.
+
+ friend struct art::FieldOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Field);
+};
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_FIELD_H_
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index bbbdf98..57ac46f 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -214,7 +214,7 @@
if (field->GetOffset().Int32Value() == field_offset.Int32Value()) {
CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
// TODO: resolve the field type for moving GC.
- mirror::Class* field_type = field->GetType(!kMovingCollector);
+ mirror::Class* field_type = field->GetType<!kMovingCollector>();
if (field_type != nullptr) {
CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
}
@@ -236,7 +236,7 @@
if (field->GetOffset().Int32Value() == field_offset.Int32Value()) {
CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
// TODO: resolve the field type for moving GC.
- mirror::Class* field_type = field->GetType(!kMovingCollector);
+ mirror::Class* field_type = field->GetType<!kMovingCollector>();
if (field_type != nullptr) {
CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 780c5ae..b730670 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -100,7 +100,7 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool VerifierInstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 21972a1..1ce298d 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -360,8 +360,7 @@
// pretend we are trying to access 'Static.s0' from StaticsFromCode.<clinit>
ScopedObjectAccess soa(Thread::Current());
jobject class_loader = LoadDex("StaticsFromCode");
- const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader)[0];
- CHECK(dex_file != NULL);
+ const DexFile* dex_file = GetFirstDexFile(class_loader);
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<ClassLoader*>(class_loader)));
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 7345448..69ef69c 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -99,7 +99,7 @@
return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
static void SetClass(Class* klass);
- static void ResetClass(void);
+ static void ResetClass();
static void VisitRoots(RootCallback* callback, void* arg);
private:
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 09dc78a..e7bd207 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -48,6 +48,10 @@
static constexpr uint32_t kAccFastNative = 0x00080000; // method (dex only)
static constexpr uint32_t kAccMiranda = 0x00200000; // method (dex only)
+// Flag is set if the compiler decides it is not worth trying
+// to inline the method. This avoids other callers to try it again and again.
+static constexpr uint32_t kAccDontInline = 0x00400000; // method (dex only)
+
// Special runtime-only flags.
// Note: if only kAccClassIsReference is set, we have a soft reference.
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 57ca2b1..2724d91 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -79,7 +79,9 @@
static void VMDebug_startMethodTracingDdmsImpl(JNIEnv*, jclass, jint bufferSize, jint flags,
jboolean samplingEnabled, jint intervalUs) {
- Trace::Start("[DDMS]", -1, bufferSize, flags, true, samplingEnabled, intervalUs);
+ Trace::Start("[DDMS]", -1, bufferSize, flags, Trace::TraceOutputMode::kDDMS,
+ samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing,
+ intervalUs);
}
static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename,
@@ -102,7 +104,9 @@
if (traceFilename.c_str() == NULL) {
return;
}
- Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, false, samplingEnabled, intervalUs);
+ Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, Trace::TraceOutputMode::kFile,
+ samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing,
+ intervalUs);
}
static void VMDebug_startMethodTracingFilename(JNIEnv* env, jclass, jstring javaTraceFilename,
@@ -112,7 +116,9 @@
if (traceFilename.c_str() == NULL) {
return;
}
- Trace::Start(traceFilename.c_str(), -1, bufferSize, flags, false, samplingEnabled, intervalUs);
+ Trace::Start(traceFilename.c_str(), -1, bufferSize, flags, Trace::TraceOutputMode::kFile,
+ samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing,
+ intervalUs);
}
static jint VMDebug_getMethodTracingMode(JNIEnv*, jclass) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 60d14e9..0ca9d24 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -17,21 +17,28 @@
#include "java_lang_Class.h"
#include "class_linker.h"
+#include "common_throws.h"
#include "dex_file-inl.h"
#include "jni_internal.h"
#include "nth_caller_visitor.h"
+#include "mirror/art_field-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
+#include "mirror/field.h"
#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/string-inl.h"
#include "scoped_thread_state_change.h"
#include "scoped_fast_native_object_access.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
+#include "utf.h"
#include "well_known_classes.h"
namespace art {
-static mirror::Class* DecodeClass(const ScopedFastNativeObjectAccess& soa, jobject java_class)
+ALWAYS_INLINE static inline mirror::Class* DecodeClass(
+ const ScopedFastNativeObjectAccess& soa, jobject java_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
DCHECK(c != NULL);
@@ -97,10 +104,173 @@
return soa.AddLocalReference<jobjectArray>(c->GetInterfaces()->Clone(soa.Self()));
}
+static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
+ Thread* self, mirror::Class* klass, bool public_only, bool force_resolve)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ StackHandleScope<3> hs(self);
+ auto h_ifields = hs.NewHandle(klass->GetIFields());
+ auto h_sfields = hs.NewHandle(klass->GetSFields());
+ const int32_t num_ifields = h_ifields.Get() != nullptr ? h_ifields->GetLength() : 0;
+ const int32_t num_sfields = h_sfields.Get() != nullptr ? h_sfields->GetLength() : 0;
+ int32_t array_size = num_ifields + num_sfields;
+ if (public_only) {
+ // Lets go subtract all the non public fields.
+ for (int32_t i = 0; i < num_ifields; ++i) {
+ if (!h_ifields->GetWithoutChecks(i)->IsPublic()) {
+ --array_size;
+ }
+ }
+ for (int32_t i = 0; i < num_sfields; ++i) {
+ if (!h_sfields->GetWithoutChecks(i)->IsPublic()) {
+ --array_size;
+ }
+ }
+ }
+ int32_t array_idx = 0;
+ auto object_array = hs.NewHandle(mirror::ObjectArray<mirror::Field>::Alloc(
+ self, mirror::Field::ArrayClass(), array_size));
+ if (object_array.Get() == nullptr) {
+ return nullptr;
+ }
+ for (int32_t i = 0; i < num_ifields; ++i) {
+ auto* art_field = h_ifields->GetWithoutChecks(i);
+ if (!public_only || art_field->IsPublic()) {
+ auto* field = mirror::Field::CreateFromArtField(self, art_field, force_resolve);
+ if (field == nullptr) {
+ if (kIsDebugBuild) {
+ self->AssertPendingException();
+ }
+ // Maybe null due to OOME or type resolving exception.
+ return nullptr;
+ }
+ object_array->SetWithoutChecks<false>(array_idx++, field);
+ }
+ }
+ for (int32_t i = 0; i < num_sfields; ++i) {
+ auto* art_field = h_sfields->GetWithoutChecks(i);
+ if (!public_only || art_field->IsPublic()) {
+ auto* field = mirror::Field::CreateFromArtField(self, art_field, force_resolve);
+ if (field == nullptr) {
+ if (kIsDebugBuild) {
+ self->AssertPendingException();
+ }
+ return nullptr;
+ }
+ object_array->SetWithoutChecks<false>(array_idx++, field);
+ }
+ }
+ CHECK_EQ(array_idx, array_size);
+ return object_array.Get();
+}
+
+static jobjectArray Class_getDeclaredFieldsUnchecked(JNIEnv* env, jobject javaThis,
+ jboolean publicOnly) {
+ ScopedFastNativeObjectAccess soa(env);
+ return soa.AddLocalReference<jobjectArray>(
+ GetDeclaredFields(soa.Self(), DecodeClass(soa, javaThis), publicOnly != JNI_FALSE, false));
+}
+
+static jobjectArray Class_getDeclaredFields(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ return soa.AddLocalReference<jobjectArray>(
+ GetDeclaredFields(soa.Self(), DecodeClass(soa, javaThis), false, true));
+}
+
+static jobjectArray Class_getPublicDeclaredFields(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ return soa.AddLocalReference<jobjectArray>(
+ GetDeclaredFields(soa.Self(), DecodeClass(soa, javaThis), true, true));
+}
+
+// Performs a binary search through an array of fields, TODO: Is this fast enough if we don't use
+// the dex cache for lookups? I think CompareModifiedUtf8ToUtf16AsCodePointValues should be fairly
+// fast.
+ALWAYS_INLINE static inline mirror::ArtField* FindFieldByName(
+ Thread* self ATTRIBUTE_UNUSED, mirror::String* name,
+ mirror::ObjectArray<mirror::ArtField>* fields)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t low = 0;
+ uint32_t high = fields->GetLength();
+ const uint16_t* const data = name->GetCharArray()->GetData() + name->GetOffset();
+ const size_t length = name->GetLength();
+ while (low < high) {
+ auto mid = (low + high) / 2;
+ mirror::ArtField* const field = fields->GetWithoutChecks(mid);
+ int result = CompareModifiedUtf8ToUtf16AsCodePointValues(field->GetName(), data, length);
+ // Alternate approach, only a few % faster at the cost of more allocations.
+ // int result = field->GetStringName(self, true)->CompareTo(name);
+ if (result < 0) {
+ low = mid + 1;
+ } else if (result > 0) {
+ high = mid;
+ } else {
+ return field;
+ }
+ }
+ if (kIsDebugBuild) {
+ for (int32_t i = 0; i < fields->GetLength(); ++i) {
+ CHECK_NE(fields->GetWithoutChecks(i)->GetName(), name->ToModifiedUtf8());
+ }
+ }
+ return nullptr;
+}
+
+ALWAYS_INLINE static inline mirror::Field* GetDeclaredField(
+ Thread* self, mirror::Class* c, mirror::String* name)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ auto* instance_fields = c->GetIFields();
+ if (instance_fields != nullptr) {
+ auto* art_field = FindFieldByName(self, name, instance_fields);
+ if (art_field != nullptr) {
+ return mirror::Field::CreateFromArtField(self, art_field, true);
+ }
+ }
+ auto* static_fields = c->GetSFields();
+ if (static_fields != nullptr) {
+ auto* art_field = FindFieldByName(self, name, static_fields);
+ if (art_field != nullptr) {
+ return mirror::Field::CreateFromArtField(self, art_field, true);
+ }
+ }
+ return nullptr;
+}
+
+static jobject Class_getDeclaredFieldInternal(JNIEnv* env, jobject javaThis, jstring name) {
+ ScopedFastNativeObjectAccess soa(env);
+ auto* name_string = soa.Decode<mirror::String*>(name);
+ return soa.AddLocalReference<jobject>(
+ GetDeclaredField(soa.Self(), DecodeClass(soa, javaThis), name_string));
+}
+
+static jobject Class_getDeclaredField(JNIEnv* env, jobject javaThis, jstring name) {
+ ScopedFastNativeObjectAccess soa(env);
+ auto* name_string = soa.Decode<mirror::String*>(name);
+ if (name == nullptr) {
+ ThrowNullPointerException("name == null");
+ return nullptr;
+ }
+ auto* klass = DecodeClass(soa, javaThis);
+ mirror::Field* result = GetDeclaredField(soa.Self(), klass, name_string);
+ if (result == nullptr) {
+ std::string name_str = name_string->ToModifiedUtf8();
+ // We may have a pending exception if we failed to resolve.
+ if (!soa.Self()->IsExceptionPending()) {
+ soa.Self()->ThrowNewException("Ljava/lang/NoSuchFieldException;", name_str.c_str());
+ }
+ return nullptr;
+ }
+ return soa.AddLocalReference<jobject>(result);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Class, classForName, "!(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
NATIVE_METHOD(Class, getNameNative, "!()Ljava/lang/String;"),
NATIVE_METHOD(Class, getProxyInterfaces, "!()[Ljava/lang/Class;"),
+ NATIVE_METHOD(Class, getDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getPublicDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredFieldsUnchecked, "!(Z)[Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredFieldInternal, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredField, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
};
void register_java_lang_Class(JNIEnv* env) {
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index 84b18ab..bd043a8 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -30,6 +30,12 @@
#include "ScopedUtfChars.h"
#include "verify_object-inl.h"
+#include <sstream>
+#ifdef HAVE_ANDROID_OS
+// This function is provided by android linker.
+extern "C" void android_update_LD_LIBRARY_PATH(const char* ld_library_path);
+#endif // HAVE_ANDROID_OS
+
namespace art {
static void Runtime_gc(JNIEnv*, jclass) {
@@ -46,30 +52,53 @@
exit(status);
}
-static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader,
- jstring javaLdLibraryPath) {
- // TODO: returns NULL on success or an error message describing the failure on failure. This
- // should be refactored in terms of suppressed exceptions.
- ScopedUtfChars filename(env, javaFilename);
- if (filename.c_str() == NULL) {
- return NULL;
+static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPathJstr, jstring javaDexPathJstr) {
+#ifdef HAVE_ANDROID_OS
+ std::stringstream ss;
+ if (javaLdLibraryPathJstr != nullptr) {
+ ScopedUtfChars javaLdLibraryPath(env, javaLdLibraryPathJstr);
+ if (javaLdLibraryPath.c_str() != nullptr) {
+ ss << javaLdLibraryPath.c_str();
+ }
}
- if (javaLdLibraryPath != NULL) {
- ScopedUtfChars ldLibraryPath(env, javaLdLibraryPath);
- if (ldLibraryPath.c_str() == NULL) {
- return NULL;
- }
- void* sym = dlsym(RTLD_DEFAULT, "android_update_LD_LIBRARY_PATH");
- if (sym != NULL) {
- typedef void (*Fn)(const char*);
- Fn android_update_LD_LIBRARY_PATH = reinterpret_cast<Fn>(sym);
- (*android_update_LD_LIBRARY_PATH)(ldLibraryPath.c_str());
- } else {
- LOG(WARNING) << "android_update_LD_LIBRARY_PATH not found; .so dependencies will not work!";
+ if (javaDexPathJstr != nullptr) {
+ ScopedUtfChars javaDexPath(env, javaDexPathJstr);
+ if (javaDexPath.c_str() != nullptr) {
+ std::vector<std::string> dexPathVector;
+ Split(javaDexPath.c_str(), ':', &dexPathVector);
+
+ for (auto abi : art::Runtime::Current()->GetCpuAbilist()) {
+ for (auto zip_path : dexPathVector) {
+ // Native libraries live under lib/<abi>/ inside .apk file.
+ ss << ":" << zip_path << "!" << "lib/" << abi;
+ }
+ }
}
}
+ std::string ldLibraryPathStr = ss.str();
+ const char* ldLibraryPath = ldLibraryPathStr.c_str();
+ if (*ldLibraryPath == ':') {
+ ++ldLibraryPath;
+ }
+
+ android_update_LD_LIBRARY_PATH(ldLibraryPath);
+#else
+ LOG(WARNING) << "android_update_LD_LIBRARY_PATH not found; .so dependencies will not work!";
+ UNUSED(javaLdLibraryPathJstr, javaDexPathJstr, env);
+#endif
+}
+
+static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader,
+ jstring javaLdLibraryPathJstr, jstring javaDexPathJstr) {
+ ScopedUtfChars filename(env, javaFilename);
+ if (filename.c_str() == nullptr) {
+ return nullptr;
+ }
+
+ SetLdLibraryPath(env, javaLdLibraryPathJstr, javaDexPathJstr);
+
std::string error_msg;
{
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
@@ -101,7 +130,7 @@
NATIVE_METHOD(Runtime, gc, "()V"),
NATIVE_METHOD(Runtime, maxMemory, "!()J"),
NATIVE_METHOD(Runtime, nativeExit, "(I)V"),
- NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/String;"),
+ NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;"),
NATIVE_METHOD(Runtime, totalMemory, "!()J"),
};
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 9c5bde9..721b7a3 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -21,35 +21,35 @@
#include "common_throws.h"
#include "dex_file-inl.h"
#include "jni_internal.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/field.h"
#include "reflection-inl.h"
#include "scoped_fast_native_object_access.h"
namespace art {
template<bool kIsSet>
-ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::ArtField* field,
+ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::Field* field,
mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (kIsSet && field->IsFinal()) {
ThrowIllegalAccessException(
StringPrintf("Cannot set %s field %s of class %s",
PrettyJavaAccessFlags(field->GetAccessFlags()).c_str(),
- PrettyField(field).c_str(),
+ PrettyField(field->GetArtField()).c_str(),
field->GetDeclaringClass() == nullptr ? "null" :
PrettyClass(field->GetDeclaringClass()).c_str()).c_str());
return false;
}
mirror::Class* calling_class = nullptr;
if (!VerifyAccess(self, obj, field->GetDeclaringClass(), field->GetAccessFlags(),
- &calling_class)) {
+ &calling_class, 1)) {
ThrowIllegalAccessException(
StringPrintf("Class %s cannot access %s field %s of class %s",
calling_class == nullptr ? "null" : PrettyClass(calling_class).c_str(),
PrettyJavaAccessFlags(field->GetAccessFlags()).c_str(),
- PrettyField(field).c_str(),
+ PrettyField(field->GetArtField()).c_str(),
field->GetDeclaringClass() == nullptr ? "null" :
PrettyClass(field->GetDeclaringClass()).c_str()).c_str());
return false;
@@ -58,38 +58,37 @@
}
template<bool kAllowReferences>
-ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::ArtField* f,
+ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::Field* f,
Primitive::Type field_type, JValue* value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(value->GetJ(), INT64_C(0));
+ MemberOffset offset(f->GetOffset());
+ const bool is_volatile = f->IsVolatile();
switch (field_type) {
case Primitive::kPrimBoolean:
- value->SetZ(f->GetBoolean(o));
+ value->SetZ(is_volatile ? o->GetFieldBooleanVolatile(offset) : o->GetFieldBoolean(offset));
return true;
case Primitive::kPrimByte:
- value->SetB(f->GetByte(o));
+ value->SetB(is_volatile ? o->GetFieldByteVolatile(offset) : o->GetFieldByte(offset));
return true;
case Primitive::kPrimChar:
- value->SetC(f->GetChar(o));
- return true;
- case Primitive::kPrimDouble:
- value->SetD(f->GetDouble(o));
- return true;
- case Primitive::kPrimFloat:
- value->SetF(f->GetFloat(o));
+ value->SetC(is_volatile ? o->GetFieldCharVolatile(offset) : o->GetFieldChar(offset));
return true;
case Primitive::kPrimInt:
- value->SetI(f->GetInt(o));
+ case Primitive::kPrimFloat:
+ value->SetI(is_volatile ? o->GetField32Volatile(offset) : o->GetField32(offset));
return true;
case Primitive::kPrimLong:
- value->SetJ(f->GetLong(o));
+ case Primitive::kPrimDouble:
+ value->SetJ(is_volatile ? o->GetField64Volatile(offset) : o->GetField64(offset));
return true;
case Primitive::kPrimShort:
- value->SetS(f->GetShort(o));
+ value->SetS(is_volatile ? o->GetFieldShortVolatile(offset) : o->GetFieldShort(offset));
return true;
case Primitive::kPrimNot:
if (kAllowReferences) {
- value->SetL(f->GetObject(o));
+ value->SetL(is_volatile ? o->GetFieldObjectVolatile<mirror::Object>(offset) :
+ o->GetFieldObject<mirror::Object>(offset));
return true;
}
// Else break to report an error.
@@ -98,23 +97,23 @@
// Never okay.
break;
}
- ThrowIllegalArgumentException(StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ ThrowIllegalArgumentException(
+ StringPrintf("Not a primitive field: %s", PrettyField(f->GetArtField()).c_str()).c_str());
return false;
}
ALWAYS_INLINE inline static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa,
- jobject j_rcvr, mirror::ArtField** f,
+ jobject j_rcvr, mirror::Field** f,
mirror::Object** class_or_rcvr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
soa.Self()->AssertThreadSuspensionIsAllowable();
mirror::Class* declaringClass = (*f)->GetDeclaringClass();
if ((*f)->IsStatic()) {
if (UNLIKELY(!declaringClass->IsInitialized())) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
StackHandleScope<2> hs(soa.Self());
- HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(f));
+ HandleWrapper<mirror::Field> h_f(hs.NewHandleWrapper(f));
HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&declaringClass));
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
if (UNLIKELY(!class_linker->EnsureInitialized(soa.Self(), h_klass, true, true))) {
DCHECK(soa.Self()->IsExceptionPending());
return false;
@@ -131,16 +130,16 @@
return true;
}
-static jobject Field_get(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
+static jobject Field_get(JNIEnv* env, jobject javaField, jobject javaObj) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtField* f = mirror::ArtField::FromReflectedField(soa, javaField);
+ mirror::Field* f = soa.Decode<mirror::Field*>(javaField);
mirror::Object* o = nullptr;
if (!CheckReceiver(soa, javaObj, &f, &o)) {
DCHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
// If field is not set to be accessible, verify it can be accessed by the caller.
- if ((accessible == JNI_FALSE) && !VerifyFieldAccess<false>(soa.Self(), f, o)) {
+ if (!f->IsAccessible() && !VerifyFieldAccess<false>(soa.Self(), f, o)) {
DCHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
@@ -157,9 +156,9 @@
template<Primitive::Type kPrimitiveType>
ALWAYS_INLINE inline static JValue GetPrimitiveField(JNIEnv* env, jobject javaField,
- jobject javaObj, jboolean accessible) {
+ jobject javaObj) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtField* f = mirror::ArtField::FromReflectedField(soa, javaField);
+ mirror::Field* f = soa.Decode<mirror::Field*>(javaField);
mirror::Object* o = nullptr;
if (!CheckReceiver(soa, javaObj, &f, &o)) {
DCHECK(soa.Self()->IsExceptionPending());
@@ -167,7 +166,7 @@
}
// If field is not set to be accessible, verify it can be accessed by the caller.
- if (accessible == JNI_FALSE && !VerifyFieldAccess<false>(soa.Self(), f, o)) {
+ if (!f->IsAccessible() && !VerifyFieldAccess<false>(soa.Self(), f, o)) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
@@ -198,72 +197,97 @@
return wide_value;
}
-static jboolean Field_getBoolean(JNIEnv* env, jobject javaField, jobject javaObj,
- jboolean accessible) {
- return GetPrimitiveField<Primitive::kPrimBoolean>(env, javaField, javaObj, accessible).GetZ();
+static jboolean Field_getBoolean(JNIEnv* env, jobject javaField, jobject javaObj) {
+ return GetPrimitiveField<Primitive::kPrimBoolean>(env, javaField, javaObj).GetZ();
}
-static jbyte Field_getByte(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
- return GetPrimitiveField<Primitive::kPrimByte>(env, javaField, javaObj, accessible).GetB();
+static jbyte Field_getByte(JNIEnv* env, jobject javaField, jobject javaObj) {
+ return GetPrimitiveField<Primitive::kPrimByte>(env, javaField, javaObj).GetB();
}
-static jchar Field_getChar(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
- return GetPrimitiveField<Primitive::kPrimChar>(env, javaField, javaObj, accessible).GetC();
+static jchar Field_getChar(JNIEnv* env, jobject javaField, jobject javaObj) {
+ return GetPrimitiveField<Primitive::kPrimChar>(env, javaField, javaObj).GetC();
}
-static jdouble Field_getDouble(JNIEnv* env, jobject javaField, jobject javaObj,
- jboolean accessible) {
- return GetPrimitiveField<Primitive::kPrimDouble>(env, javaField, javaObj, accessible).GetD();
+static jdouble Field_getDouble(JNIEnv* env, jobject javaField, jobject javaObj) {
+ return GetPrimitiveField<Primitive::kPrimDouble>(env, javaField, javaObj).GetD();
}
-static jfloat Field_getFloat(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
- return GetPrimitiveField<Primitive::kPrimFloat>(env, javaField, javaObj, accessible).GetF();
+static jfloat Field_getFloat(JNIEnv* env, jobject javaField, jobject javaObj) {
+ return GetPrimitiveField<Primitive::kPrimFloat>(env, javaField, javaObj).GetF();
}
-static jint Field_getInt(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
- return GetPrimitiveField<Primitive::kPrimInt>(env, javaField, javaObj, accessible).GetI();
+static jint Field_getInt(JNIEnv* env, jobject javaField, jobject javaObj) {
+ return GetPrimitiveField<Primitive::kPrimInt>(env, javaField, javaObj).GetI();
}
-static jlong Field_getLong(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
- return GetPrimitiveField<Primitive::kPrimLong>(env, javaField, javaObj, accessible).GetJ();
+static jlong Field_getLong(JNIEnv* env, jobject javaField, jobject javaObj) {
+ return GetPrimitiveField<Primitive::kPrimLong>(env, javaField, javaObj).GetJ();
}
-static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
- return GetPrimitiveField<Primitive::kPrimShort>(env, javaField, javaObj, accessible).GetS();
+static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj) {
+ return GetPrimitiveField<Primitive::kPrimShort>(env, javaField, javaObj).GetS();
}
-static void SetFieldValue(mirror::Object* o, mirror::ArtField* f, Primitive::Type field_type,
- bool allow_references, const JValue& new_value)
+ALWAYS_INLINE inline static void SetFieldValue(mirror::Object* o, mirror::Field* f,
+ Primitive::Type field_type, bool allow_references,
+ const JValue& new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(f->GetDeclaringClass()->IsInitialized());
+ MemberOffset offset(f->GetOffset());
+ const bool is_volatile = f->IsVolatile();
switch (field_type) {
case Primitive::kPrimBoolean:
- f->SetBoolean<false>(o, new_value.GetZ());
+ if (is_volatile) {
+ o->SetFieldBooleanVolatile<false>(offset, new_value.GetZ());
+ } else {
+ o->SetFieldBoolean<false>(offset, new_value.GetZ());
+ }
break;
case Primitive::kPrimByte:
- f->SetByte<false>(o, new_value.GetB());
+ if (is_volatile) {
+ o->SetFieldBooleanVolatile<false>(offset, new_value.GetB());
+ } else {
+ o->SetFieldBoolean<false>(offset, new_value.GetB());
+ }
break;
case Primitive::kPrimChar:
- f->SetChar<false>(o, new_value.GetC());
- break;
- case Primitive::kPrimDouble:
- f->SetDouble<false>(o, new_value.GetD());
- break;
- case Primitive::kPrimFloat:
- f->SetFloat<false>(o, new_value.GetF());
+ if (is_volatile) {
+ o->SetFieldBooleanVolatile<false>(offset, new_value.GetC());
+ } else {
+ o->SetFieldBoolean<false>(offset, new_value.GetC());
+ }
break;
case Primitive::kPrimInt:
- f->SetInt<false>(o, new_value.GetI());
+ case Primitive::kPrimFloat:
+ if (is_volatile) {
+ o->SetField32Volatile<false>(offset, new_value.GetI());
+ } else {
+ o->SetField32<false>(offset, new_value.GetI());
+ }
break;
case Primitive::kPrimLong:
- f->SetLong<false>(o, new_value.GetJ());
+ case Primitive::kPrimDouble:
+ if (is_volatile) {
+ o->SetField64Volatile<false>(offset, new_value.GetJ());
+ } else {
+ o->SetField64<false>(offset, new_value.GetJ());
+ }
break;
case Primitive::kPrimShort:
- f->SetShort<false>(o, new_value.GetS());
+ if (is_volatile) {
+ o->SetFieldShortVolatile<false>(offset, new_value.GetS());
+ } else {
+ o->SetFieldShort<false>(offset, new_value.GetS());
+ }
break;
case Primitive::kPrimNot:
if (allow_references) {
- f->SetObject<false>(o, new_value.GetL());
+ if (is_volatile) {
+ o->SetFieldObjectVolatile<false>(offset, new_value.GetL());
+ } else {
+ o->SetFieldObject<false>(offset, new_value.GetL());
+ }
break;
}
// Else fall through to report an error.
@@ -271,15 +295,14 @@
case Primitive::kPrimVoid:
// Never okay.
ThrowIllegalArgumentException(StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ PrettyField(f->GetArtField()).c_str()).c_str());
return;
}
}
-static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject javaValue,
- jboolean accessible) {
+static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject javaValue) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtField* f = mirror::ArtField::FromReflectedField(soa, javaField);
+ mirror::Field* f = soa.Decode<mirror::Field*>(javaField);
// Check that the receiver is non-null and an instance of the field's declaring class.
mirror::Object* o = nullptr;
if (!CheckReceiver(soa, javaObj, &f, &o)) {
@@ -287,18 +310,11 @@
return;
}
mirror::Class* field_type;
- const char* field_type_desciptor = f->GetTypeDescriptor();
+ const char* field_type_desciptor = f->GetArtField()->GetTypeDescriptor();
Primitive::Type field_prim_type = Primitive::GetType(field_type_desciptor[0]);
if (field_prim_type == Primitive::kPrimNot) {
- StackHandleScope<2> hs(soa.Self());
- HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
- HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
- // May cause resolution.
- field_type = h_f->GetType(true);
- if (field_type == nullptr) {
- DCHECK(soa.Self()->IsExceptionPending());
- return;
- }
+ field_type = f->GetType();
+ DCHECK(field_type != nullptr);
} else {
field_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass(field_type_desciptor[0]);
}
@@ -306,12 +322,12 @@
// Unbox the value, if necessary.
mirror::Object* boxed_value = soa.Decode<mirror::Object*>(javaValue);
JValue unboxed_value;
- if (!UnboxPrimitiveForField(boxed_value, field_type, f, &unboxed_value)) {
+ if (!UnboxPrimitiveForField(boxed_value, field_type, f->GetArtField(), &unboxed_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return;
}
// If field is not set to be accessible, verify it can be accessed by the caller.
- if ((accessible == JNI_FALSE) && !VerifyFieldAccess<true>(soa.Self(), f, o)) {
+ if (!f->IsAccessible() && !VerifyFieldAccess<true>(soa.Self(), f, o)) {
DCHECK(soa.Self()->IsExceptionPending());
return;
}
@@ -320,9 +336,9 @@
template<Primitive::Type kPrimitiveType>
static void SetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj,
- const JValue& new_value, jboolean accessible) {
+ const JValue& new_value) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtField* f = mirror::ArtField::FromReflectedField(soa, javaField);
+ mirror::Field* f = soa.Decode<mirror::Field*>(javaField);
mirror::Object* o = nullptr;
if (!CheckReceiver(soa, javaObj, &f, &o)) {
return;
@@ -330,7 +346,7 @@
Primitive::Type field_type = f->GetTypeAsPrimitiveType();
if (UNLIKELY(field_type == Primitive::kPrimNot)) {
ThrowIllegalArgumentException(StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ PrettyField(f->GetArtField()).c_str()).c_str());
return;
}
@@ -342,7 +358,7 @@
}
// If field is not set to be accessible, verify it can be accessed by the caller.
- if ((accessible == JNI_FALSE) && !VerifyFieldAccess<true>(soa.Self(), f, o)) {
+ if (!f->IsAccessible() && !VerifyFieldAccess<true>(soa.Self(), f, o)) {
DCHECK(soa.Self()->IsExceptionPending());
return;
}
@@ -351,81 +367,73 @@
SetFieldValue(o, f, field_type, false, wide_value);
}
-static void Field_setBoolean(JNIEnv* env, jobject javaField, jobject javaObj, jboolean z,
- jboolean accessible) {
+static void Field_setBoolean(JNIEnv* env, jobject javaField, jobject javaObj, jboolean z) {
JValue value;
value.SetZ(z);
- SetPrimitiveField<Primitive::kPrimBoolean>(env, javaField, javaObj, value, accessible);
+ SetPrimitiveField<Primitive::kPrimBoolean>(env, javaField, javaObj, value);
}
-static void Field_setByte(JNIEnv* env, jobject javaField, jobject javaObj, jbyte b,
- jboolean accessible) {
+static void Field_setByte(JNIEnv* env, jobject javaField, jobject javaObj, jbyte b) {
JValue value;
value.SetB(b);
- SetPrimitiveField<Primitive::kPrimByte>(env, javaField, javaObj, value, accessible);
+ SetPrimitiveField<Primitive::kPrimByte>(env, javaField, javaObj, value);
}
-static void Field_setChar(JNIEnv* env, jobject javaField, jobject javaObj, jchar c,
- jboolean accessible) {
+static void Field_setChar(JNIEnv* env, jobject javaField, jobject javaObj, jchar c) {
JValue value;
value.SetC(c);
- SetPrimitiveField<Primitive::kPrimChar>(env, javaField, javaObj, value, accessible);
+ SetPrimitiveField<Primitive::kPrimChar>(env, javaField, javaObj, value);
}
-static void Field_setDouble(JNIEnv* env, jobject javaField, jobject javaObj, jdouble d,
- jboolean accessible) {
+static void Field_setDouble(JNIEnv* env, jobject javaField, jobject javaObj, jdouble d) {
JValue value;
value.SetD(d);
- SetPrimitiveField<Primitive::kPrimDouble>(env, javaField, javaObj, value, accessible);
+ SetPrimitiveField<Primitive::kPrimDouble>(env, javaField, javaObj, value);
}
-static void Field_setFloat(JNIEnv* env, jobject javaField, jobject javaObj, jfloat f,
- jboolean accessible) {
+static void Field_setFloat(JNIEnv* env, jobject javaField, jobject javaObj, jfloat f) {
JValue value;
value.SetF(f);
- SetPrimitiveField<Primitive::kPrimFloat>(env, javaField, javaObj, value, accessible);
+ SetPrimitiveField<Primitive::kPrimFloat>(env, javaField, javaObj, value);
}
-static void Field_setInt(JNIEnv* env, jobject javaField, jobject javaObj, jint i,
- jboolean accessible) {
+static void Field_setInt(JNIEnv* env, jobject javaField, jobject javaObj, jint i) {
JValue value;
value.SetI(i);
- SetPrimitiveField<Primitive::kPrimInt>(env, javaField, javaObj, value, accessible);
+ SetPrimitiveField<Primitive::kPrimInt>(env, javaField, javaObj, value);
}
-static void Field_setLong(JNIEnv* env, jobject javaField, jobject javaObj, jlong j,
- jboolean accessible) {
+static void Field_setLong(JNIEnv* env, jobject javaField, jobject javaObj, jlong j) {
JValue value;
value.SetJ(j);
- SetPrimitiveField<Primitive::kPrimLong>(env, javaField, javaObj, value, accessible);
+ SetPrimitiveField<Primitive::kPrimLong>(env, javaField, javaObj, value);
}
-static void Field_setShort(JNIEnv* env, jobject javaField, jobject javaObj, jshort s,
- jboolean accessible) {
+static void Field_setShort(JNIEnv* env, jobject javaField, jobject javaObj, jshort s) {
JValue value;
value.SetS(s);
- SetPrimitiveField<Primitive::kPrimShort>(env, javaField, javaObj, value, accessible);
+ SetPrimitiveField<Primitive::kPrimShort>(env, javaField, javaObj, value);
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Field, get, "!(Ljava/lang/Object;Z)Ljava/lang/Object;"),
- NATIVE_METHOD(Field, getBoolean, "!(Ljava/lang/Object;Z)Z"),
- NATIVE_METHOD(Field, getByte, "!(Ljava/lang/Object;Z)B"),
- NATIVE_METHOD(Field, getChar, "!(Ljava/lang/Object;Z)C"),
- NATIVE_METHOD(Field, getDouble, "!(Ljava/lang/Object;Z)D"),
- NATIVE_METHOD(Field, getFloat, "!(Ljava/lang/Object;Z)F"),
- NATIVE_METHOD(Field, getInt, "!(Ljava/lang/Object;Z)I"),
- NATIVE_METHOD(Field, getLong, "!(Ljava/lang/Object;Z)J"),
- NATIVE_METHOD(Field, getShort, "!(Ljava/lang/Object;Z)S"),
- NATIVE_METHOD(Field, set, "!(Ljava/lang/Object;Ljava/lang/Object;Z)V"),
- NATIVE_METHOD(Field, setBoolean, "!(Ljava/lang/Object;ZZ)V"),
- NATIVE_METHOD(Field, setByte, "!(Ljava/lang/Object;BZ)V"),
- NATIVE_METHOD(Field, setChar, "!(Ljava/lang/Object;CZ)V"),
- NATIVE_METHOD(Field, setDouble, "!(Ljava/lang/Object;DZ)V"),
- NATIVE_METHOD(Field, setFloat, "!(Ljava/lang/Object;FZ)V"),
- NATIVE_METHOD(Field, setInt, "!(Ljava/lang/Object;IZ)V"),
- NATIVE_METHOD(Field, setLong, "!(Ljava/lang/Object;JZ)V"),
- NATIVE_METHOD(Field, setShort, "!(Ljava/lang/Object;SZ)V"),
+ NATIVE_METHOD(Field, get, "!(Ljava/lang/Object;)Ljava/lang/Object;"),
+ NATIVE_METHOD(Field, getBoolean, "!(Ljava/lang/Object;)Z"),
+ NATIVE_METHOD(Field, getByte, "!(Ljava/lang/Object;)B"),
+ NATIVE_METHOD(Field, getChar, "!(Ljava/lang/Object;)C"),
+ NATIVE_METHOD(Field, getDouble, "!(Ljava/lang/Object;)D"),
+ NATIVE_METHOD(Field, getFloat, "!(Ljava/lang/Object;)F"),
+ NATIVE_METHOD(Field, getInt, "!(Ljava/lang/Object;)I"),
+ NATIVE_METHOD(Field, getLong, "!(Ljava/lang/Object;)J"),
+ NATIVE_METHOD(Field, getShort, "!(Ljava/lang/Object;)S"),
+ NATIVE_METHOD(Field, set, "!(Ljava/lang/Object;Ljava/lang/Object;)V"),
+ NATIVE_METHOD(Field, setBoolean, "!(Ljava/lang/Object;Z)V"),
+ NATIVE_METHOD(Field, setByte, "!(Ljava/lang/Object;B)V"),
+ NATIVE_METHOD(Field, setChar, "!(Ljava/lang/Object;C)V"),
+ NATIVE_METHOD(Field, setDouble, "!(Ljava/lang/Object;D)V"),
+ NATIVE_METHOD(Field, setFloat, "!(Ljava/lang/Object;F)V"),
+ NATIVE_METHOD(Field, setInt, "!(Ljava/lang/Object;I)V"),
+ NATIVE_METHOD(Field, setLong, "!(Ljava/lang/Object;J)V"),
+ NATIVE_METHOD(Field, setShort, "!(Ljava/lang/Object;S)V"),
};
void register_java_lang_reflect_Field(JNIEnv* env) {
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index 300abc9..1cbf2bb 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -23,7 +23,7 @@
class NoopCompilerCallbacks FINAL : public CompilerCallbacks {
public:
- NoopCompilerCallbacks() {}
+ NoopCompilerCallbacks() : CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp) {}
~NoopCompilerCallbacks() {}
bool MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {
diff --git a/runtime/oat.h b/runtime/oat.h
index 79cb024..120de6d 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '5', '8', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '6', '1', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 356e3d2..69cb22d 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -17,10 +17,11 @@
#include "oat_file.h"
#include <dlfcn.h>
-#include <sstream>
#include <string.h>
#include <unistd.h>
+#include <sstream>
+
#include "base/bit_vector.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
@@ -38,12 +39,33 @@
namespace art {
+std::string OatFile::ResolveRelativeEncodedDexLocation(
+ const char* abs_dex_location, const std::string& rel_dex_location) {
+ if (abs_dex_location != nullptr && rel_dex_location[0] != '/') {
+ // Strip :classes<N>.dex used for secondary multidex files.
+ std::string base = DexFile::GetBaseLocation(rel_dex_location);
+ std::string multidex_suffix = DexFile::GetMultiDexSuffix(rel_dex_location);
+
+ // Check if the base is a suffix of the provided abs_dex_location.
+ std::string target_suffix = "/" + base;
+ std::string abs_location(abs_dex_location);
+ if (abs_location.size() > target_suffix.size()) {
+ size_t pos = abs_location.size() - target_suffix.size();
+ if (abs_location.compare(pos, std::string::npos, target_suffix) == 0) {
+ return abs_location + multidex_suffix;
+ }
+ }
+ }
+ return rel_dex_location;
+}
+
void OatFile::CheckLocation(const std::string& location) {
CHECK(!location.empty());
}
OatFile* OatFile::OpenWithElfFile(ElfFile* elf_file,
const std::string& location,
+ const char* abs_dex_location,
std::string* error_msg) {
std::unique_ptr<OatFile> oat_file(new OatFile(location, false));
oat_file->elf_file_.reset(elf_file);
@@ -53,7 +75,7 @@
oat_file->begin_ = elf_file->Begin() + offset;
oat_file->end_ = elf_file->Begin() + size + offset;
// Ignore the optional .bss section when opening non-executable.
- return oat_file->Setup(error_msg) ? oat_file.release() : nullptr;
+ return oat_file->Setup(abs_dex_location, error_msg) ? oat_file.release() : nullptr;
}
OatFile* OatFile::Open(const std::string& filename,
@@ -61,6 +83,7 @@
uint8_t* requested_base,
uint8_t* oat_file_begin,
bool executable,
+ const char* abs_dex_location,
std::string* error_msg) {
CHECK(!filename.empty()) << location;
CheckLocation(location);
@@ -80,7 +103,7 @@
return nullptr;
}
ret.reset(OpenElfFile(file.get(), location, requested_base, oat_file_begin, false, executable,
- error_msg));
+ abs_dex_location, error_msg));
// It would be nice to unlink here. But we might have opened the file created by the
// ScopedLock, which we better not delete to avoid races. TODO: Investigate how to fix the API
@@ -88,14 +111,18 @@
return ret.release();
}
-OatFile* OatFile::OpenWritable(File* file, const std::string& location, std::string* error_msg) {
+OatFile* OatFile::OpenWritable(File* file, const std::string& location,
+ const char* abs_dex_location,
+ std::string* error_msg) {
CheckLocation(location);
- return OpenElfFile(file, location, nullptr, nullptr, true, false, error_msg);
+ return OpenElfFile(file, location, nullptr, nullptr, true, false, abs_dex_location, error_msg);
}
-OatFile* OatFile::OpenReadable(File* file, const std::string& location, std::string* error_msg) {
+OatFile* OatFile::OpenReadable(File* file, const std::string& location,
+ const char* abs_dex_location,
+ std::string* error_msg) {
CheckLocation(location);
- return OpenElfFile(file, location, nullptr, nullptr, false, false, error_msg);
+ return OpenElfFile(file, location, nullptr, nullptr, false, false, abs_dex_location, error_msg);
}
OatFile* OatFile::OpenElfFile(File* file,
@@ -104,10 +131,11 @@
uint8_t* oat_file_begin,
bool writable,
bool executable,
+ const char* abs_dex_location,
std::string* error_msg) {
std::unique_ptr<OatFile> oat_file(new OatFile(location, executable));
bool success = oat_file->ElfFileOpen(file, requested_base, oat_file_begin, writable, executable,
- error_msg);
+ abs_dex_location, error_msg);
if (!success) {
CHECK(!error_msg->empty());
return nullptr;
@@ -131,6 +159,7 @@
bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, uint8_t* oat_file_begin,
bool writable, bool executable,
+ const char* abs_dex_location,
std::string* error_msg) {
// TODO: rename requested_base to oat_data_begin
elf_file_.reset(ElfFile::Open(file, writable, /*program_header_only*/true, error_msg,
@@ -180,10 +209,10 @@
bss_end_ += sizeof(uint32_t);
}
- return Setup(error_msg);
+ return Setup(abs_dex_location, error_msg);
}
-bool OatFile::Setup(std::string* error_msg) {
+bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
if (!GetOatHeader().IsValid()) {
std::string cause = GetOatHeader().GetValidationErrorMessage();
*error_msg = StringPrintf("Invalid oat header for '%s': %s", GetLocation().c_str(),
@@ -230,7 +259,9 @@
return false;
}
- std::string dex_file_location(dex_file_location_data, dex_file_location_size);
+ std::string dex_file_location = ResolveRelativeEncodedDexLocation(
+ abs_dex_location,
+ std::string(dex_file_location_data, dex_file_location_size));
uint32_t dex_file_checksum = *reinterpret_cast<const uint32_t*>(oat);
oat += sizeof(dex_file_checksum);
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 564185c..51952f3 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -43,14 +43,18 @@
// Opens an oat file contained within the given elf file. This is always opened as
// non-executable at the moment.
static OatFile* OpenWithElfFile(ElfFile* elf_file, const std::string& location,
+ const char* abs_dex_location,
std::string* error_msg);
// Open an oat file. Returns NULL on failure. Requested base can
// optionally be used to request where the file should be loaded.
+ // See the ResolveRelativeEncodedDexLocation for a description of how the
+ // abs_dex_location argument is used.
static OatFile* Open(const std::string& filename,
const std::string& location,
uint8_t* requested_base,
uint8_t* oat_file_begin,
bool executable,
+ const char* abs_dex_location,
std::string* error_msg);
// Open an oat file from an already opened File.
@@ -58,9 +62,13 @@
// where relocations may be required. Currently used from
// ImageWriter which wants to open a writable version from an existing
// file descriptor for patching.
- static OatFile* OpenWritable(File* file, const std::string& location, std::string* error_msg);
+ static OatFile* OpenWritable(File* file, const std::string& location,
+ const char* abs_dex_location,
+ std::string* error_msg);
// Opens an oat file from an already opened File. Maps it PROT_READ, MAP_PRIVATE.
- static OatFile* OpenReadable(File* file, const std::string& location, std::string* error_msg);
+ static OatFile* OpenReadable(File* file, const std::string& location,
+ const char* abs_dex_location,
+ std::string* error_msg);
~OatFile();
@@ -279,6 +287,18 @@
const uint8_t* BssBegin() const;
const uint8_t* BssEnd() const;
+ // Returns the absolute dex location for the encoded relative dex location.
+ //
+ // If not nullptr, abs_dex_location is used to resolve the absolute dex
+ // location of relative dex locations encoded in the oat file.
+ // For example, given absolute location "/data/app/foo/base.apk", encoded
+ // dex locations "base.apk", "base.apk:classes2.dex", etc. would be resolved
+ // to "/data/app/foo/base.apk", "/data/app/foo/base.apk:classes2.dex", etc.
+ // Relative encoded dex locations that don't match the given abs_dex_location
+ // are left unchanged.
+ static std::string ResolveRelativeEncodedDexLocation(
+ const char* abs_dex_location, const std::string& rel_dex_location);
+
private:
static void CheckLocation(const std::string& location);
@@ -288,14 +308,17 @@
uint8_t* oat_file_begin, // Override base if not null
bool writable,
bool executable,
+ const char* abs_dex_location,
std::string* error_msg);
explicit OatFile(const std::string& filename, bool executable);
bool ElfFileOpen(File* file, uint8_t* requested_base,
uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable, bool executable,
+ const char* abs_dex_location,
std::string* error_msg);
- bool Setup(std::string* error_msg);
+
+ bool Setup(const char* abs_dex_location, std::string* error_msg);
// The oat file name.
//
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index f87fa4f..d92f59b 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -737,18 +737,19 @@
CHECK(error_msg != nullptr);
// The odex file name is formed by replacing the dex_location extension with
- // .odex and inserting an isa directory. For example:
+ // .odex and inserting an oat/<isa> directory. For example:
// location = /foo/bar/baz.jar
- // odex_location = /foo/bar/<isa>/baz.odex
+ // odex_location = /foo/bar/oat/<isa>/baz.odex
- // Find the directory portion of the dex location and add the isa directory.
+ // Find the directory portion of the dex location and add the oat/<isa>
+ // directory.
size_t pos = location.rfind('/');
if (pos == std::string::npos) {
*error_msg = "Dex location " + location + " has no directory.";
return false;
}
std::string dir = location.substr(0, pos+1);
- dir += std::string(GetInstructionSetString(isa));
+ dir += "oat/" + std::string(GetInstructionSetString(isa));
// Find the file portion of the dex location.
std::string file;
@@ -850,7 +851,7 @@
std::string error_msg;
cached_odex_file_.reset(OatFile::Open(odex_file_name.c_str(),
odex_file_name.c_str(), nullptr, nullptr, load_executable_,
- &error_msg));
+ dex_location_, &error_msg));
if (cached_odex_file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing pre-compiled oat file "
<< odex_file_name << ": " << error_msg;
@@ -875,7 +876,8 @@
const std::string& oat_file_name = *OatFileName();
std::string error_msg;
cached_oat_file_.reset(OatFile::Open(oat_file_name.c_str(),
- oat_file_name.c_str(), nullptr, nullptr, load_executable_, &error_msg));
+ oat_file_name.c_str(), nullptr, nullptr, load_executable_,
+ dex_location_, &error_msg));
if (cached_oat_file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing oat file "
<< oat_file_name << ": " << error_msg;
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 958b440..f2abcf9 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -151,11 +151,12 @@
static std::vector<std::unique_ptr<const DexFile>> LoadDexFiles(
const OatFile& oat_file, const char* dex_location);
- // If the dex file has been pre-compiled on the host, the compiled oat file
- // will have the extension .odex, and is referred to as the odex file.
- // It is called odex for legacy reasons; the file is really an oat file. The
- // odex file will typically have a patch delta of 0 and need to be relocated
- // before use for the purposes of ASLR.
+ // If the dex file has been installed with a compiled oat file alongside
+ // it, the compiled oat file will have the extension .odex, and is referred
+ // to as the odex file. It is called odex for legacy reasons; the file is
+ // really an oat file. The odex file will often, but not always, have a
+ // patch delta of 0 and need to be relocated before use for the purposes of
+ // ASLR. The odex file is treated as if it were read-only.
// These methods return the location and status of the odex file for the dex
// location.
// Notes:
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index be8652c..a8b0876 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -27,6 +27,7 @@
#include "class_linker.h"
#include "common_runtime_test.h"
+#include "compiler_callbacks.h"
#include "mem_map.h"
#include "os.h"
#include "thread-inl.h"
@@ -44,10 +45,13 @@
scratch_dir_ = android_data_ + "/OatFileAssistantTest";
ASSERT_EQ(0, mkdir(scratch_dir_.c_str(), 0700));
- // Create a subdirectory in scratch for the current isa.
- // This is the location that will be used for odex files in the tests.
- isa_dir_ = scratch_dir_ + "/" + GetInstructionSetString(kRuntimeISA);
- ASSERT_EQ(0, mkdir(isa_dir_.c_str(), 0700));
+ // Create a subdirectory in scratch for odex files.
+ odex_oat_dir_ = scratch_dir_ + "/oat";
+ ASSERT_EQ(0, mkdir(odex_oat_dir_.c_str(), 0700));
+
+ odex_dir_ = odex_oat_dir_ + "/" + std::string(GetInstructionSetString(kRuntimeISA));
+ ASSERT_EQ(0, mkdir(odex_dir_.c_str(), 0700));
+
// Verify the environment is as we expect
uint32_t checksum;
@@ -74,11 +78,7 @@
nullptr));
// Make sure compilercallbacks are not set so that relocation will be
// enabled.
- for (std::pair<std::string, const void*>& pair : *options) {
- if (pair.first == "compilercallbacks") {
- pair.second = nullptr;
- }
- }
+ callbacks_.reset();
}
virtual void PreRuntimeCreate() {
@@ -90,8 +90,11 @@
}
virtual void TearDown() {
- ClearDirectory(isa_dir_.c_str());
- ASSERT_EQ(0, rmdir(isa_dir_.c_str()));
+ ClearDirectory(odex_dir_.c_str());
+ ASSERT_EQ(0, rmdir(odex_dir_.c_str()));
+
+ ClearDirectory(odex_oat_dir_.c_str());
+ ASSERT_EQ(0, rmdir(odex_oat_dir_.c_str()));
ClearDirectory(scratch_dir_.c_str());
ASSERT_EQ(0, rmdir(scratch_dir_.c_str()));
@@ -153,10 +156,10 @@
return scratch_dir_;
}
- // ISA directory is the subdirectory in the scratch directory where odex
+ // Odex directory is the subdirectory in the scratch directory where odex
// files should be located.
- std::string GetISADir() {
- return isa_dir_;
+ std::string GetOdexDir() {
+ return odex_dir_;
}
// Generate an odex file for the purposes of test.
@@ -207,29 +210,29 @@
// image in case of the GSS collector.
+ 384 * MB;
- std::string error_msg;
std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
ASSERT_TRUE(map.get() != nullptr) << "Failed to build process map";
for (BacktraceMap::const_iterator it = map->begin();
reservation_start < reservation_end && it != map->end(); ++it) {
- if (it->end <= reservation_start) {
- continue;
- }
+ ReserveImageSpaceChunk(reservation_start, std::min(it->start, reservation_end));
+ reservation_start = std::max(reservation_start, it->end);
+ }
+ ReserveImageSpaceChunk(reservation_start, reservation_end);
+ }
- if (it->start < reservation_start) {
- reservation_start = std::min(reservation_end, it->end);
- }
-
+ // Reserve a chunk of memory for the image space in the given range.
+ // Only has effect for chunks with a positive number of bytes.
+ void ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) {
+ if (start < end) {
+ std::string error_msg;
image_reservation_.push_back(std::unique_ptr<MemMap>(
MemMap::MapAnonymous("image reservation",
- reinterpret_cast<uint8_t*>(reservation_start),
- std::min(it->start, reservation_end) - reservation_start,
+ reinterpret_cast<uint8_t*>(start), end - start,
PROT_NONE, false, false, &error_msg)));
ASSERT_TRUE(image_reservation_.back().get() != nullptr) << error_msg;
LOG(INFO) << "Reserved space for image " <<
reinterpret_cast<void*>(image_reservation_.back()->Begin()) << "-" <<
reinterpret_cast<void*>(image_reservation_.back()->End());
- reservation_start = it->end;
}
}
@@ -241,7 +244,8 @@
}
std::string scratch_dir_;
- std::string isa_dir_;
+ std::string odex_oat_dir_;
+ std::string odex_dir_;
std::vector<std::unique_ptr<MemMap>> image_reservation_;
};
@@ -326,12 +330,43 @@
GenerateOatForTest(dex_location.c_str());
// Verify we can load both dex files.
- OatFileAssistant executable_oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
- std::unique_ptr<OatFile> oat_file = executable_oat_file_assistant.GetBestOatFile();
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
EXPECT_TRUE(oat_file->IsExecutable());
std::vector<std::unique_ptr<const DexFile>> dex_files;
- dex_files = executable_oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(2u, dex_files.size());
+}
+
+// Case: We have a MultiDEX file and up-to-date OAT file for it with relative
+// encoded dex locations.
+// Expect: The oat file status is kUpToDate.
+TEST_F(OatFileAssistantTest, RelativeEncodedDexLocation) {
+ std::string dex_location = GetScratchDir() + "/RelativeEncodedDexLocation.jar";
+ std::string oat_location = GetOdexDir() + "/RelativeEncodedDexLocation.oat";
+
+ // Create the dex file
+ Copy(GetMultiDexSrc1(), dex_location);
+
+ // Create the oat file with relative encoded dex location.
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location);
+ args.push_back("--dex-location=" + std::string("RelativeEncodedDexLocation.jar"));
+ args.push_back("--oat-file=" + oat_location);
+
+ std::string error_msg;
+ ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
+
+ // Verify we can load both dex files.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(),
+ oat_location.c_str(),
+ kRuntimeISA, true);
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(2u, dex_files.size());
}
@@ -362,7 +397,7 @@
// Expect: The oat file status is kNeedsRelocation.
TEST_F(OatFileAssistantTest, DexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/DexOdexNoOat.jar";
- std::string odex_location = GetISADir() + "/DexOdexNoOat.odex";
+ std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
// Create the dex and odex files
Copy(GetDexSrc1(), dex_location);
@@ -388,7 +423,7 @@
// Expect: The oat file status is kNeedsRelocation.
TEST_F(OatFileAssistantTest, StrippedDexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/StrippedDexOdexNoOat.jar";
- std::string odex_location = GetISADir() + "/StrippedDexOdexNoOat.odex";
+ std::string odex_location = GetOdexDir() + "/StrippedDexOdexNoOat.odex";
// Create the dex and odex files
Copy(GetDexSrc1(), dex_location);
@@ -437,7 +472,7 @@
// Expect: The oat file status is kNeedsRelocation.
TEST_F(OatFileAssistantTest, StrippedDexOdexOat) {
std::string dex_location = GetScratchDir() + "/StrippedDexOdexOat.jar";
- std::string odex_location = GetISADir() + "/StrippedDexOdexOat.odex";
+ std::string odex_location = GetOdexDir() + "/StrippedDexOdexOat.odex";
// Create the oat file from a different dex file so it looks out of date.
Copy(GetDexSrc2(), dex_location);
@@ -494,8 +529,8 @@
// Expect: It shouldn't crash.
TEST_F(OatFileAssistantTest, OdexOatOverlap) {
std::string dex_location = GetScratchDir() + "/OdexOatOverlap.jar";
- std::string odex_location = GetISADir() + "/OdexOatOverlap.odex";
- std::string oat_location = GetISADir() + "/OdexOatOverlap.oat";
+ std::string odex_location = GetOdexDir() + "/OdexOatOverlap.odex";
+ std::string oat_location = GetOdexDir() + "/OdexOatOverlap.oat";
// Create the dex and odex files
Copy(GetDexSrc1(), dex_location);
@@ -532,7 +567,7 @@
// Expect: The oat file status is kUpToDate, because PIC needs no relocation.
TEST_F(OatFileAssistantTest, DexPicOdexNoOat) {
std::string dex_location = GetScratchDir() + "/DexPicOdexNoOat.jar";
- std::string odex_location = GetISADir() + "/DexPicOdexNoOat.odex";
+ std::string odex_location = GetOdexDir() + "/DexPicOdexNoOat.odex";
// Create the dex and odex files
Copy(GetDexSrc1(), dex_location);
@@ -772,7 +807,7 @@
// avoid using up the virtual memory address space.
TEST_F(OatFileAssistantTest, RaceToGenerate) {
std::string dex_location = GetScratchDir() + "/RaceToGenerate.jar";
- std::string oat_location = GetISADir() + "/RaceToGenerate.oat";
+ std::string oat_location = GetOdexDir() + "/RaceToGenerate.oat";
// We use the lib core dex file, because it's large, and hopefully should
// take a while to generate.
@@ -802,7 +837,7 @@
// Expect: We should load the odex file non-executable.
TEST_F(OatFileAssistantNoDex2OatTest, LoadDexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/LoadDexOdexNoOat.jar";
- std::string odex_location = GetISADir() + "/LoadDexOdexNoOat.odex";
+ std::string odex_location = GetOdexDir() + "/LoadDexOdexNoOat.odex";
// Create the dex and odex files
Copy(GetDexSrc1(), dex_location);
@@ -824,7 +859,7 @@
// Expect: We should load the odex file non-executable.
TEST_F(OatFileAssistantNoDex2OatTest, LoadMultiDexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/LoadMultiDexOdexNoOat.jar";
- std::string odex_location = GetISADir() + "/LoadMultiDexOdexNoOat.odex";
+ std::string odex_location = GetOdexDir() + "/LoadMultiDexOdexNoOat.odex";
// Create the dex and odex files
Copy(GetMultiDexSrc1(), dex_location);
@@ -847,11 +882,11 @@
EXPECT_TRUE(OatFileAssistant::DexFilenameToOdexFilename(
"/foo/bar/baz.jar", kArm, &odex_file, &error_msg)) << error_msg;
- EXPECT_EQ("/foo/bar/arm/baz.odex", odex_file);
+ EXPECT_EQ("/foo/bar/oat/arm/baz.odex", odex_file);
EXPECT_TRUE(OatFileAssistant::DexFilenameToOdexFilename(
"/foo/bar/baz.funnyext", kArm, &odex_file, &error_msg)) << error_msg;
- EXPECT_EQ("/foo/bar/arm/baz.odex", odex_file);
+ EXPECT_EQ("/foo/bar/oat/arm/baz.odex", odex_file);
EXPECT_FALSE(OatFileAssistant::DexFilenameToOdexFilename(
"nopath.jar", kArm, &odex_file, &error_msg));
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
new file mode 100644
index 0000000..f2213e9
--- /dev/null
+++ b/runtime/oat_file_test.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat_file.h"
+
+#include <string>
+
+#include <gtest/gtest.h>
+
+namespace art {
+
+TEST(OatFileTest, ResolveRelativeEncodedDexLocation) {
+ EXPECT_EQ(std::string("/data/app/foo/base.apk"),
+ OatFile::ResolveRelativeEncodedDexLocation(
+ nullptr, "/data/app/foo/base.apk"));
+
+ EXPECT_EQ(std::string("/system/framework/base.apk"),
+ OatFile::ResolveRelativeEncodedDexLocation(
+ "/data/app/foo/base.apk", "/system/framework/base.apk"));
+
+ EXPECT_EQ(std::string("/data/app/foo/base.apk"),
+ OatFile::ResolveRelativeEncodedDexLocation(
+ "/data/app/foo/base.apk", "base.apk"));
+
+ EXPECT_EQ(std::string("/data/app/foo/base.apk"),
+ OatFile::ResolveRelativeEncodedDexLocation(
+ "/data/app/foo/base.apk", "foo/base.apk"));
+
+ EXPECT_EQ(std::string("/data/app/foo/base.apk:classes2.dex"),
+ OatFile::ResolveRelativeEncodedDexLocation(
+ "/data/app/foo/base.apk", "base.apk:classes2.dex"));
+
+ EXPECT_EQ(std::string("/data/app/foo/base.apk:classes11.dex"),
+ OatFile::ResolveRelativeEncodedDexLocation(
+ "/data/app/foo/base.apk", "base.apk:classes11.dex"));
+
+ EXPECT_EQ(std::string("base.apk"),
+ OatFile::ResolveRelativeEncodedDexLocation(
+ "/data/app/foo/sludge.apk", "base.apk"));
+
+ EXPECT_EQ(std::string("o/base.apk"),
+ OatFile::ResolveRelativeEncodedDexLocation(
+ "/data/app/foo/base.apk", "o/base.apk"));
+}
+
+} // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index a53aeaa..337c5df 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -255,6 +255,9 @@
.IntoKey(M::ZygoteMaxFailedBoots)
.Define("-Xno-dex-file-fallback")
.IntoKey(M::NoDexFileFallback)
+ .Define("--cpu-abilist=_")
+ .WithType<std::string>()
+ .IntoKey(M::CpuAbiList)
.Ignore({
"-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 2d6b6b3..32bfdaf 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -153,7 +153,10 @@
}
static bool IsIntegralType(Type type) {
+ // The Java language does not allow treating boolean as an integral type but
+ // our bit representation makes it safe.
switch (type) {
+ case kPrimBoolean:
case kPrimByte:
case kPrimChar:
case kPrimShort:
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 3260992..cb97049 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -185,7 +185,7 @@
MutableHandle<mirror::ArtField> fhandle = hs.NewHandle(static_fields->Get(0));
EXPECT_EQ("interfaces", std::string(fhandle->GetName()));
EXPECT_EQ("[Ljava/lang/Class;", std::string(fhandle->GetTypeDescriptor()));
- EXPECT_EQ(interfacesFieldClass.Get(), fhandle->GetType(true));
+ EXPECT_EQ(interfacesFieldClass.Get(), fhandle->GetType<true>());
std::string temp;
EXPECT_EQ("L$Proxy1234;", std::string(fhandle->GetDeclaringClass()->GetDescriptor(&temp)));
EXPECT_FALSE(fhandle->IsPrimitiveType());
@@ -194,7 +194,7 @@
fhandle.Assign(static_fields->Get(1));
EXPECT_EQ("throws", std::string(fhandle->GetName()));
EXPECT_EQ("[[Ljava/lang/Class;", std::string(fhandle->GetTypeDescriptor()));
- EXPECT_EQ(throwsFieldClass.Get(), fhandle->GetType(true));
+ EXPECT_EQ(throwsFieldClass.Get(), fhandle->GetType<true>());
EXPECT_EQ("L$Proxy1234;", std::string(fhandle->GetDeclaringClass()->GetDescriptor(&temp)));
EXPECT_FALSE(fhandle->IsPrimitiveType());
}
diff --git a/runtime/reflection-inl.h b/runtime/reflection-inl.h
index f21c1a0..f54d4ca 100644
--- a/runtime/reflection-inl.h
+++ b/runtime/reflection-inl.h
@@ -22,6 +22,7 @@
#include "base/stringprintf.h"
#include "common_throws.h"
#include "jvalue.h"
+#include "mirror/object-inl.h"
#include "primitive.h"
#include "utils.h"
@@ -99,6 +100,17 @@
return false;
}
+inline bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) {
+ if (UNLIKELY(o == nullptr)) {
+ ThrowNullPointerException("null receiver");
+ return false;
+ } else if (UNLIKELY(!o->InstanceOf(c))) {
+ InvalidReceiverError(o, c);
+ return false;
+ }
+ return true;
+}
+
} // namespace art
#endif // ART_RUNTIME_REFLECTION_INL_H_
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index a54a39d..4e94de4 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -24,7 +24,6 @@
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
-#include "mirror/class.h"
#include "mirror/object_array-inl.h"
#include "mirror/object_array.h"
#include "nth_caller_visitor.h"
@@ -588,7 +587,7 @@
// If method is not set to be accessible, verify it can be accessed by the caller.
mirror::Class* calling_class = nullptr;
if (!accessible && !VerifyAccess(soa.Self(), receiver, declaring_class, m->GetAccessFlags(),
- &calling_class)) {
+ &calling_class, 2)) {
ThrowIllegalAccessException(
StringPrintf("Class %s cannot access %s method %s of class %s",
calling_class == nullptr ? "null" : PrettyClass(calling_class).c_str(),
@@ -628,21 +627,6 @@
return soa.AddLocalReference<jobject>(BoxPrimitive(Primitive::GetType(shorty[0]), result));
}
-bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) {
- if (o == nullptr) {
- ThrowNullPointerException("null receiver");
- return false;
- } else if (!o->InstanceOf(c)) {
- std::string expected_class_name(PrettyDescriptor(c));
- std::string actual_class_name(PrettyTypeOf(o));
- ThrowIllegalArgumentException(StringPrintf("Expected receiver of type %s, but got %s",
- expected_class_name.c_str(),
- actual_class_name.c_str()).c_str());
- return false;
- }
- return true;
-}
-
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) {
if (src_class == Primitive::kPrimNot) {
return value.GetL();
@@ -810,11 +794,11 @@
}
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
- uint32_t access_flags, mirror::Class** calling_class) {
+ uint32_t access_flags, mirror::Class** calling_class, size_t num_frames) {
if ((access_flags & kAccPublic) != 0) {
return true;
}
- NthCallerVisitor visitor(self, 2);
+ NthCallerVisitor visitor(self, num_frames);
visitor.WalkStack();
if (UNLIKELY(visitor.caller == nullptr)) {
// The caller is an attached native thread.
@@ -840,4 +824,12 @@
return declaring_class->IsInSamePackage(caller_class);
}
+void InvalidReceiverError(mirror::Object* o, mirror::Class* c) {
+ std::string expected_class_name(PrettyDescriptor(c));
+ std::string actual_class_name(PrettyTypeOf(o));
+ ThrowIllegalArgumentException(StringPrintf("Expected receiver of type %s, but got %s",
+ expected_class_name.c_str(),
+ actual_class_name.c_str()).c_str());
+}
+
} // namespace art
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 857d63b..ff970e5 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -69,11 +69,14 @@
jobject args, bool accessible)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c)
+ALWAYS_INLINE bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
- uint32_t access_flags, mirror::Class** calling_class)
+ uint32_t access_flags, mirror::Class** calling_class, size_t num_frames)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+void InvalidReceiverError(mirror::Object* o, mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 0f0c327..23a7db6 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -50,9 +50,11 @@
#include "arch/x86_64/registers_x86_64.h"
#include "asm_support.h"
#include "atomic.h"
+#include "base/arena_allocator.h"
#include "base/dumpable.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
+#include "compiler_callbacks.h"
#include "debugger.h"
#include "elf_file.h"
#include "entrypoints/runtime_asm_entrypoints.h"
@@ -73,6 +75,7 @@
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
+#include "mirror/field.h"
#include "mirror/stack_trace_element.h"
#include "mirror/throwable.h"
#include "monitor.h"
@@ -163,7 +166,6 @@
method_trace_(false),
method_trace_file_size_(0),
instrumentation_(),
- use_compile_time_class_path_(false),
main_thread_group_(nullptr),
system_thread_group_(nullptr),
system_class_loader_(nullptr),
@@ -405,9 +407,9 @@
return true;
}
-static jobject CreateSystemClassLoader() {
- if (Runtime::Current()->UseCompileTimeClassPath()) {
- return NULL;
+static jobject CreateSystemClassLoader(Runtime* runtime) {
+ if (runtime->IsAotCompiler() && !runtime->GetCompilerCallbacks()->IsBootImage()) {
+ return nullptr;
}
ScopedObjectAccess soa(Thread::Current());
@@ -505,7 +507,7 @@
Thread::FinishStartup();
- system_class_loader_ = CreateSystemClassLoader();
+ system_class_loader_ = CreateSystemClassLoader(this);
if (is_zygote_) {
if (!InitZygote()) {
@@ -688,7 +690,7 @@
return false;
}
std::unique_ptr<OatFile> oat_file(OatFile::OpenWithElfFile(elf_file.release(), oat_location,
- &error_msg));
+ nullptr, &error_msg));
if (oat_file.get() == nullptr) {
LOG(INFO) << "Unable to use '" << oat_filename << "' because " << error_msg;
return false;
@@ -792,6 +794,8 @@
verify_ = runtime_options.GetOrDefault(Opt::Verify);
allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
+ Split(runtime_options.GetOrDefault(Opt::CpuAbiList), ',', &cpu_abilist_);
+
if (runtime_options.GetOrDefault(Opt::Interpret)) {
GetInstrumentation()->ForceInterpretOnly();
}
@@ -1009,8 +1013,8 @@
-1,
static_cast<int>(method_trace_file_size_),
0,
- false,
- false,
+ Trace::TraceOutputMode::kFile,
+ Trace::TraceMode::kMethodTracing,
0);
}
@@ -1288,6 +1292,7 @@
mirror::StackTraceElement::VisitRoots(callback, arg);
mirror::String::VisitRoots(callback, arg);
mirror::Throwable::VisitRoots(callback, arg);
+ mirror::Field::VisitRoots(callback, arg);
// Visit all the primitive array types classes.
mirror::PrimitiveArray<uint8_t>::VisitRoots(callback, arg); // BooleanArray
mirror::PrimitiveArray<int8_t>::VisitRoots(callback, arg); // ByteArray
@@ -1481,23 +1486,6 @@
callee_save_methods_[type] = GcRoot<mirror::ArtMethod>(method);
}
-const std::vector<const DexFile*>& Runtime::GetCompileTimeClassPath(jobject class_loader) {
- if (class_loader == NULL) {
- return GetClassLinker()->GetBootClassPath();
- }
- CHECK(UseCompileTimeClassPath());
- CompileTimeClassPaths::const_iterator it = compile_time_class_paths_.find(class_loader);
- CHECK(it != compile_time_class_paths_.end());
- return it->second;
-}
-
-void Runtime::SetCompileTimeClassPath(jobject class_loader,
- std::vector<const DexFile*>& class_path) {
- CHECK(!IsStarted());
- use_compile_time_class_path_ = true;
- compile_time_class_paths_.Put(class_loader, class_path);
-}
-
void Runtime::StartProfiler(const char* profile_output_filename) {
profile_output_filename_ = profile_output_filename;
profiler_started_ =
@@ -1669,4 +1657,12 @@
}
}
+bool Runtime::CanRelocate() const {
+ return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
+}
+
+bool Runtime::IsCompilingBootImage() const {
+ return IsCompiler() && compiler_callbacks_->IsBootImage();
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 7f33547..085335f 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -27,10 +27,7 @@
#include <vector>
#include "arch/instruction_set.h"
-#include "base/allocator.h"
-#include "base/arena_allocator.h"
#include "base/macros.h"
-#include "compiler_callbacks.h"
#include "gc_root.h"
#include "instrumentation.h"
#include "jobject_comparator.h"
@@ -43,6 +40,9 @@
namespace art {
+class ArenaPool;
+class CompilerCallbacks;
+
namespace gc {
class Heap;
namespace collector {
@@ -112,9 +112,10 @@
return compiler_callbacks_ != nullptr;
}
- bool CanRelocate() const {
- return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
- }
+ // If a compiler, are we compiling a boot image?
+ bool IsCompilingBootImage() const;
+
+ bool CanRelocate() const;
bool ShouldRelocate() const {
return must_relocate_ && CanRelocate();
@@ -452,16 +453,6 @@
return &instrumentation_;
}
- bool UseCompileTimeClassPath() const {
- return use_compile_time_class_path_;
- }
-
- const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader);
-
- // The caller is responsible for ensuring the class_path DexFiles remain
- // valid as long as the Runtime object remains valid.
- void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path);
-
void StartProfiler(const char* profile_output_filename);
void UpdateProfilerState(int state);
@@ -524,6 +515,10 @@
return allow_dex_file_fallback_;
}
+ const std::vector<std::string>& GetCpuAbilist() const {
+ return cpu_abilist_;
+ }
+
bool RunningOnValgrind() const {
return running_on_valgrind_;
}
@@ -681,12 +676,6 @@
size_t method_trace_file_size_;
instrumentation::Instrumentation instrumentation_;
- typedef AllocationTrackingSafeMap<jobject, std::vector<const DexFile*>,
- kAllocatorTagCompileTimeClassPath, JobjectComparator>
- CompileTimeClassPaths;
- CompileTimeClassPaths compile_time_class_paths_;
- bool use_compile_time_class_path_;
-
jobject main_thread_group_;
jobject system_thread_group_;
@@ -706,6 +695,9 @@
// available/usable.
bool allow_dex_file_fallback_;
+ // List of supported cpu abis.
+ std::vector<std::string> cpu_abilist_;
+
// Specifies target SDK version to allow workarounds for certain API levels.
int32_t target_sdk_version_;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 8775f8d..1f273cf 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -62,7 +62,7 @@
RUNTIME_OPTIONS_KEY (Unit, DumpJITInfoOnShutdown)
RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
-RUNTIME_OPTIONS_KEY (bool, UseTLAB, false)
+RUNTIME_OPTIONS_KEY (bool, UseTLAB, kUseTlab)
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
@@ -104,6 +104,7 @@
ImageCompilerOptions) // -Ximage-compiler-option ...
RUNTIME_OPTIONS_KEY (bool, Verify, true)
RUNTIME_OPTIONS_KEY (std::string, NativeBridge)
+RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
// Not parse-able from command line, but can be provided explicitly.
RUNTIME_OPTIONS_KEY (const std::vector<const DexFile*>*, \
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 2d688ee..4ae49dd 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -206,21 +206,22 @@
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
DexRegisterLocation::Kind location_kind =
- dex_register_map.GetLocationKind(vreg, number_of_dex_registers);
+ dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
switch (location_kind) {
case DexRegisterLocation::Kind::kInStack: {
- const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg, number_of_dex_registers);
+ const int32_t offset =
+ dex_register_map.GetStackOffsetInBytes(vreg, number_of_dex_registers, code_info);
const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
*val = *reinterpret_cast<const uint32_t*>(addr);
return true;
}
case DexRegisterLocation::Kind::kInRegister:
case DexRegisterLocation::Kind::kInFpuRegister: {
- uint32_t reg = dex_register_map.GetMachineRegister(vreg, number_of_dex_registers);
+ uint32_t reg = dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info);
return GetRegisterIfAccessible(reg, kind, val);
}
case DexRegisterLocation::Kind::kConstant:
- *val = dex_register_map.GetConstant(vreg, number_of_dex_registers);
+ *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info);
return true;
case DexRegisterLocation::Kind::kNone:
return false;
@@ -228,7 +229,7 @@
LOG(FATAL)
<< "Unexpected location kind"
<< DexRegisterLocation::PrettyDescriptor(
- dex_register_map.GetLocationInternalKind(vreg, number_of_dex_registers));
+ dex_register_map.GetLocationInternalKind(vreg, number_of_dex_registers, code_info));
UNREACHABLE();
}
}
@@ -396,18 +397,19 @@
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
DexRegisterLocation::Kind location_kind =
- dex_register_map.GetLocationKind(vreg, number_of_dex_registers);
+ dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
uint32_t dex_pc = m->ToDexPc(cur_quick_frame_pc_, false);
switch (location_kind) {
case DexRegisterLocation::Kind::kInStack: {
- const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg, number_of_dex_registers);
+ const int32_t offset =
+ dex_register_map.GetStackOffsetInBytes(vreg, number_of_dex_registers, code_info);
uint8_t* addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + offset;
*reinterpret_cast<uint32_t*>(addr) = new_value;
return true;
}
case DexRegisterLocation::Kind::kInRegister:
case DexRegisterLocation::Kind::kInFpuRegister: {
- uint32_t reg = dex_register_map.GetMachineRegister(vreg, number_of_dex_registers);
+ uint32_t reg = dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info);
return SetRegisterIfAccessible(reg, new_value, kind);
}
case DexRegisterLocation::Kind::kConstant:
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
new file mode 100644
index 0000000..11e7e44
--- /dev/null
+++ b/runtime/stack_map.cc
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stack_map.h"
+
+#include <stdint.h>
+
+namespace art {
+
+constexpr size_t DexRegisterLocationCatalog::kNoLocationEntryIndex;
+constexpr uint32_t StackMap::kNoDexRegisterMap;
+constexpr uint32_t StackMap::kNoInlineInfo;
+
+DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(uint16_t dex_register_number,
+ uint16_t number_of_dex_registers,
+ const CodeInfo& code_info) const {
+ DexRegisterLocationCatalog dex_register_location_catalog =
+ code_info.GetDexRegisterLocationCatalog();
+ size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
+ dex_register_number,
+ number_of_dex_registers,
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries());
+ return dex_register_location_catalog.GetLocationInternalKind(location_catalog_entry_index);
+}
+
+DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register_number,
+ uint16_t number_of_dex_registers,
+ const CodeInfo& code_info) const {
+ DexRegisterLocationCatalog dex_register_location_catalog =
+ code_info.GetDexRegisterLocationCatalog();
+ size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
+ dex_register_number,
+ number_of_dex_registers,
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries());
+ return dex_register_location_catalog.GetDexRegisterLocation(location_catalog_entry_index);
+}
+
+// Loads `number_of_bytes` at the given `offset` and assemble a uint32_t. If `check_max` is true,
+// this method converts a maximum value of size `number_of_bytes` into a uint32_t 0xFFFFFFFF.
+static uint32_t LoadAt(MemoryRegion region,
+ size_t number_of_bytes,
+ size_t offset,
+ bool check_max = false) {
+ if (number_of_bytes == 0u) {
+ DCHECK(!check_max);
+ return 0;
+ } else if (number_of_bytes == 1u) {
+ uint8_t value = region.LoadUnaligned<uint8_t>(offset);
+ if (check_max && value == 0xFF) {
+ return -1;
+ } else {
+ return value;
+ }
+ } else if (number_of_bytes == 2u) {
+ uint16_t value = region.LoadUnaligned<uint16_t>(offset);
+ if (check_max && value == 0xFFFF) {
+ return -1;
+ } else {
+ return value;
+ }
+ } else if (number_of_bytes == 3u) {
+ uint16_t low = region.LoadUnaligned<uint16_t>(offset);
+ uint16_t high = region.LoadUnaligned<uint8_t>(offset + sizeof(uint16_t));
+ uint32_t value = (high << 16) + low;
+ if (check_max && value == 0xFFFFFF) {
+ return -1;
+ } else {
+ return value;
+ }
+ } else {
+ DCHECK_EQ(number_of_bytes, 4u);
+ return region.LoadUnaligned<uint32_t>(offset);
+ }
+}
+
+static void StoreAt(MemoryRegion region, size_t number_of_bytes, size_t offset, uint32_t value) {
+ if (number_of_bytes == 0u) {
+ DCHECK_EQ(value, 0u);
+ } else if (number_of_bytes == 1u) {
+ region.StoreUnaligned<uint8_t>(offset, value);
+ } else if (number_of_bytes == 2u) {
+ region.StoreUnaligned<uint16_t>(offset, value);
+ } else if (number_of_bytes == 3u) {
+ region.StoreUnaligned<uint16_t>(offset, Low16Bits(value));
+ region.StoreUnaligned<uint8_t>(offset + sizeof(uint16_t), High16Bits(value));
+ } else {
+ region.StoreUnaligned<uint32_t>(offset, value);
+ DCHECK_EQ(number_of_bytes, 4u);
+ }
+}
+
+uint32_t StackMap::GetDexPc(const CodeInfo& info) const {
+ return LoadAt(region_, info.NumberOfBytesForDexPc(), info.ComputeStackMapDexPcOffset());
+}
+
+void StackMap::SetDexPc(const CodeInfo& info, uint32_t dex_pc) {
+ StoreAt(region_, info.NumberOfBytesForDexPc(), info.ComputeStackMapDexPcOffset(), dex_pc);
+}
+
+uint32_t StackMap::GetNativePcOffset(const CodeInfo& info) const {
+ return LoadAt(region_, info.NumberOfBytesForNativePc(), info.ComputeStackMapNativePcOffset());
+}
+
+void StackMap::SetNativePcOffset(const CodeInfo& info, uint32_t native_pc_offset) {
+ StoreAt(region_, info.NumberOfBytesForNativePc(), info.ComputeStackMapNativePcOffset(), native_pc_offset);
+}
+
+uint32_t StackMap::GetDexRegisterMapOffset(const CodeInfo& info) const {
+ return LoadAt(region_,
+ info.NumberOfBytesForDexRegisterMap(),
+ info.ComputeStackMapDexRegisterMapOffset(),
+ /* check_max */ true);
+}
+
+void StackMap::SetDexRegisterMapOffset(const CodeInfo& info, uint32_t offset) {
+ StoreAt(region_,
+ info.NumberOfBytesForDexRegisterMap(),
+ info.ComputeStackMapDexRegisterMapOffset(),
+ offset);
+}
+
+uint32_t StackMap::GetInlineDescriptorOffset(const CodeInfo& info) const {
+ if (!info.HasInlineInfo()) return kNoInlineInfo;
+ return LoadAt(region_,
+ info.NumberOfBytesForInlineInfo(),
+ info.ComputeStackMapInlineInfoOffset(),
+ /* check_max */ true);
+}
+
+void StackMap::SetInlineDescriptorOffset(const CodeInfo& info, uint32_t offset) {
+ DCHECK(info.HasInlineInfo());
+ StoreAt(region_,
+ info.NumberOfBytesForInlineInfo(),
+ info.ComputeStackMapInlineInfoOffset(),
+ offset);
+}
+
+uint32_t StackMap::GetRegisterMask(const CodeInfo& info) const {
+ return LoadAt(region_,
+ info.NumberOfBytesForRegisterMask(),
+ info.ComputeStackMapRegisterMaskOffset());
+}
+
+void StackMap::SetRegisterMask(const CodeInfo& info, uint32_t mask) {
+ StoreAt(region_,
+ info.NumberOfBytesForRegisterMask(),
+ info.ComputeStackMapRegisterMaskOffset(),
+ mask);
+}
+
+size_t StackMap::ComputeStackMapSizeInternal(size_t stack_mask_size,
+ size_t number_of_bytes_for_inline_info,
+ size_t number_of_bytes_for_dex_map,
+ size_t number_of_bytes_for_dex_pc,
+ size_t number_of_bytes_for_native_pc,
+ size_t number_of_bytes_for_register_mask) {
+ return stack_mask_size
+ + number_of_bytes_for_inline_info
+ + number_of_bytes_for_dex_map
+ + number_of_bytes_for_dex_pc
+ + number_of_bytes_for_native_pc
+ + number_of_bytes_for_register_mask;
+}
+
+size_t StackMap::ComputeStackMapSize(size_t stack_mask_size,
+ size_t inline_info_size,
+ size_t dex_register_map_size,
+ size_t dex_pc_max,
+ size_t native_pc_max,
+ size_t register_mask_max) {
+ return ComputeStackMapSizeInternal(
+ stack_mask_size,
+ inline_info_size == 0
+ ? 0
+ // + 1 to also encode kNoInlineInfo.
+ : CodeInfo::EncodingSizeInBytes(inline_info_size + dex_register_map_size + 1),
+ // + 1 to also encode kNoDexRegisterMap.
+ CodeInfo::EncodingSizeInBytes(dex_register_map_size + 1),
+ CodeInfo::EncodingSizeInBytes(dex_pc_max),
+ CodeInfo::EncodingSizeInBytes(native_pc_max),
+ CodeInfo::EncodingSizeInBytes(register_mask_max));
+}
+
+MemoryRegion StackMap::GetStackMask(const CodeInfo& info) const {
+ return region_.Subregion(info.ComputeStackMapStackMaskOffset(), info.GetStackMaskSize());
+}
+
+static void DumpRegisterMapping(std::ostream& os,
+ size_t dex_register_num,
+ DexRegisterLocation location,
+ const std::string& prefix = "v",
+ const std::string& suffix = "") {
+ os << " " << prefix << dex_register_num << ": "
+ << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind())
+ << " (" << location.GetValue() << ")" << suffix << '\n';
+}
+
+void CodeInfo::DumpStackMapHeader(std::ostream& os, size_t stack_map_num) const {
+ StackMap stack_map = GetStackMapAt(stack_map_num);
+ os << " StackMap " << stack_map_num
+ << std::hex
+ << " (dex_pc=0x" << stack_map.GetDexPc(*this)
+ << ", native_pc_offset=0x" << stack_map.GetNativePcOffset(*this)
+ << ", dex_register_map_offset=0x" << stack_map.GetDexRegisterMapOffset(*this)
+ << ", inline_info_offset=0x" << stack_map.GetInlineDescriptorOffset(*this)
+ << ", register_mask=0x" << stack_map.GetRegisterMask(*this)
+ << std::dec
+ << ", stack_mask=0b";
+ MemoryRegion stack_mask = stack_map.GetStackMask(*this);
+ for (size_t i = 0, e = stack_mask.size_in_bits(); i < e; ++i) {
+ os << stack_mask.LoadBit(e - i - 1);
+ }
+ os << ")\n";
+};
+
+void CodeInfo::Dump(std::ostream& os, uint16_t number_of_dex_registers) const {
+ uint32_t code_info_size = GetOverallSize();
+ size_t number_of_stack_maps = GetNumberOfStackMaps();
+ os << " Optimized CodeInfo (size=" << code_info_size
+ << ", number_of_dex_registers=" << number_of_dex_registers
+ << ", number_of_stack_maps=" << number_of_stack_maps
+ << ", has_inline_info=" << HasInlineInfo()
+ << ", number_of_bytes_for_inline_info=" << NumberOfBytesForInlineInfo()
+ << ", number_of_bytes_for_dex_register_map=" << NumberOfBytesForDexRegisterMap()
+ << ", number_of_bytes_for_dex_pc=" << NumberOfBytesForDexPc()
+ << ", number_of_bytes_for_native_pc=" << NumberOfBytesForNativePc()
+ << ", number_of_bytes_for_register_mask=" << NumberOfBytesForRegisterMask()
+ << ")\n";
+
+ // Display the Dex register location catalog.
+ size_t number_of_location_catalog_entries = GetNumberOfDexRegisterLocationCatalogEntries();
+ size_t location_catalog_size_in_bytes = GetDexRegisterLocationCatalogSize();
+ os << " DexRegisterLocationCatalog (number_of_entries=" << number_of_location_catalog_entries
+ << ", size_in_bytes=" << location_catalog_size_in_bytes << ")\n";
+ DexRegisterLocationCatalog dex_register_location_catalog = GetDexRegisterLocationCatalog();
+ for (size_t i = 0; i < number_of_location_catalog_entries; ++i) {
+ DexRegisterLocation location = dex_register_location_catalog.GetDexRegisterLocation(i);
+ DumpRegisterMapping(os, i, location, "entry ");
+ }
+
+ // Display stack maps along with (live) Dex register maps.
+ for (size_t i = 0; i < number_of_stack_maps; ++i) {
+ StackMap stack_map = GetStackMapAt(i);
+ DumpStackMapHeader(os, i);
+ if (stack_map.HasDexRegisterMap(*this)) {
+ DexRegisterMap dex_register_map = GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ // TODO: Display the bit mask of live Dex registers.
+ for (size_t j = 0; j < number_of_dex_registers; ++j) {
+ if (dex_register_map.IsDexRegisterLive(j)) {
+ size_t location_catalog_entry_index = dex_register_map.GetLocationCatalogEntryIndex(
+ j, number_of_dex_registers, number_of_location_catalog_entries);
+ DexRegisterLocation location =
+ dex_register_map.GetDexRegisterLocation(j, number_of_dex_registers, *this);
+ DumpRegisterMapping(
+ os, j, location, "v",
+ "\t[entry " + std::to_string(static_cast<int>(location_catalog_entry_index)) + "]");
+ }
+ }
+ }
+ }
+ // TODO: Dump the stack map's inline information.
+}
+
+} // namespace art
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 961772c..f68cafe 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -26,13 +26,12 @@
// Size of a frame slot, in bytes. This constant is a signed value,
// to please the compiler in arithmetic operations involving int32_t
// (signed) values.
-static ssize_t constexpr kFrameSlotSize = 4;
-
-// Word alignment required on ARM, in bytes.
-static constexpr size_t kWordAlignment = 4;
+static constexpr ssize_t kFrameSlotSize = 4;
// Size of Dex virtual registers.
-static size_t constexpr kVRegSize = 4;
+static constexpr size_t kVRegSize = 4;
+
+class CodeInfo;
/**
* Classes in the following file are wrapper on stack map information backed
@@ -95,9 +94,9 @@
*
* In addition, DexRegisterMap also uses these values:
* - kInStackLargeOffset: value holds a "large" stack offset (greater than
- * 128 bytes);
- * - kConstantLargeValue: value holds a "large" constant (lower than or
- * equal to -16, or greater than 16).
+ * or equal to 128 bytes);
+ * - kConstantLargeValue: value holds a "large" constant (lower than 0, or
+ * or greater than or equal to 32).
*/
enum class Kind : uint8_t {
// Short location kinds, for entries fitting on one byte (3 bits
@@ -118,8 +117,7 @@
kInStackLargeOffset = 5, // 0b101
// Large constant, that cannot fit on a 5-bit signed integer (i.e.,
- // lower than -2^(5-1) = -16, or greater than or equal to
- // 2^(5-1) - 1 = 15).
+ // lower than 0, or greater than or equal to 2^5 = 32).
kConstantLargeValue = 6, // 0b110
kLastLocationKind = kConstantLargeValue
@@ -191,8 +189,10 @@
}
}
- DexRegisterLocation(Kind kind, int32_t value)
- : kind_(kind), value_(value) {}
+ // Required by art::StackMapStream::LocationCatalogEntriesIndices.
+ DexRegisterLocation() : kind_(Kind::kNone), value_(0) {}
+
+ DexRegisterLocation(Kind kind, int32_t value) : kind_(kind), value_(value) {}
static DexRegisterLocation None() {
return DexRegisterLocation(Kind::kNone, 0);
@@ -210,36 +210,34 @@
// Get the actual kind of the location.
Kind GetInternalKind() const { return kind_; }
+ bool operator==(DexRegisterLocation other) const {
+ return kind_ == other.kind_ && value_ == other.value_;
+ }
+
+ bool operator!=(DexRegisterLocation other) const {
+ return !(*this == other);
+ }
+
private:
Kind kind_;
int32_t value_;
+
+ friend class DexRegisterLocationHashFn;
};
/**
- * Information on dex register values for a specific PC. The information is
- * of the form:
- * [live_bit_mask, DexRegisterLocation+].
+ * Store information on unique Dex register locations used in a method.
+ * The information is of the form:
+ * [DexRegisterLocation+].
* DexRegisterLocations are either 1- or 5-byte wide (see art::DexRegisterLocation::Kind).
*/
-class DexRegisterMap {
+class DexRegisterLocationCatalog {
public:
- explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
+ explicit DexRegisterLocationCatalog(MemoryRegion region) : region_(region) {}
// Short (compressed) location, fitting on one byte.
typedef uint8_t ShortLocation;
- static size_t LiveBitMaskSize(uint16_t number_of_dex_registers) {
- return RoundUp(number_of_dex_registers, kBitsPerByte) / kBitsPerByte;
- }
-
- void SetLiveBitMask(size_t offset,
- uint16_t number_of_dex_registers,
- const BitVector& live_dex_registers_mask) {
- for (uint16_t i = 0; i < number_of_dex_registers; i++) {
- region_.StoreBit(offset + i, live_dex_registers_mask.IsBitSet(i));
- }
- }
-
void SetRegisterInfo(size_t offset, const DexRegisterLocation& dex_register_location) {
DexRegisterLocation::Kind kind = ComputeCompressedKind(dex_register_location);
int32_t value = dex_register_location.GetValue();
@@ -255,12 +253,12 @@
DCHECK_EQ(value % kFrameSlotSize, 0);
value /= kFrameSlotSize;
}
- DCHECK(IsUint<kValueBits>(value)) << value;
+ DCHECK(IsShortValue(value)) << value;
region_.StoreUnaligned<ShortLocation>(offset, MakeShortLocation(kind, value));
} else {
// Large location. Write the location on one byte and the value
// on 4 bytes.
- DCHECK(!IsUint<kValueBits>(value)) << value;
+ DCHECK(!IsShortValue(value)) << value;
if (kind == DexRegisterLocation::Kind::kInStackLargeOffset) {
// Also divide large stack offsets by 4 for the sake of consistency.
DCHECK_EQ(value % kFrameSlotSize, 0);
@@ -275,63 +273,39 @@
}
}
- bool IsDexRegisterLive(uint16_t dex_register_index) const {
+ // Find the offset of the location catalog entry number `location_catalog_entry_index`.
+ size_t FindLocationOffset(size_t location_catalog_entry_index) const {
size_t offset = kFixedSize;
- return region_.LoadBit(offset + dex_register_index);
- }
-
- static constexpr size_t kNoDexRegisterLocationOffset = -1;
-
- static size_t GetDexRegisterMapLocationsOffset(uint16_t number_of_dex_registers) {
- return kLiveBitMaskOffset + LiveBitMaskSize(number_of_dex_registers);
- }
-
- // Find the offset of the Dex register location number `dex_register_index`.
- size_t FindLocationOffset(uint16_t dex_register_index, uint16_t number_of_dex_registers) const {
- if (!IsDexRegisterLive(dex_register_index)) return kNoDexRegisterLocationOffset;
- size_t offset = GetDexRegisterMapLocationsOffset(number_of_dex_registers);
- // Skip the first `dex_register_index - 1` entries.
- for (uint16_t i = 0; i < dex_register_index; ++i) {
- if (IsDexRegisterLive(i)) {
- // Read the first next byte and inspect its first 3 bits to decide
- // whether it is a short or a large location.
- DexRegisterLocation::Kind kind = ExtractKindAtOffset(offset);
- if (DexRegisterLocation::IsShortLocationKind(kind)) {
- // Short location. Skip the current byte.
- offset += SingleShortEntrySize();
- } else {
- // Large location. Skip the 5 next bytes.
- offset += SingleLargeEntrySize();
- }
+ // Skip the first `location_catalog_entry_index - 1` entries.
+ for (uint16_t i = 0; i < location_catalog_entry_index; ++i) {
+ // Read the first next byte and inspect its first 3 bits to decide
+ // whether it is a short or a large location.
+ DexRegisterLocation::Kind kind = ExtractKindAtOffset(offset);
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Skip the current byte.
+ offset += SingleShortEntrySize();
+ } else {
+ // Large location. Skip the 5 next bytes.
+ offset += SingleLargeEntrySize();
}
}
return offset;
}
- // Get the surface kind.
- DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_index,
- uint16_t number_of_dex_registers) const {
- return IsDexRegisterLive(dex_register_index)
- ? DexRegisterLocation::ConvertToSurfaceKind(
- GetLocationInternalKind(dex_register_index, number_of_dex_registers))
- : DexRegisterLocation::Kind::kNone;
+ // Get the internal kind of entry at `location_catalog_entry_index`.
+ DexRegisterLocation::Kind GetLocationInternalKind(size_t location_catalog_entry_index) const {
+ if (location_catalog_entry_index == kNoLocationEntryIndex) {
+ return DexRegisterLocation::Kind::kNone;
+ }
+ return ExtractKindAtOffset(FindLocationOffset(location_catalog_entry_index));
}
- // Get the internal kind.
- DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_index,
- uint16_t number_of_dex_registers) const {
- return IsDexRegisterLive(dex_register_index)
- ? ExtractKindAtOffset(FindLocationOffset(dex_register_index, number_of_dex_registers))
- : DexRegisterLocation::Kind::kNone;
- }
-
- // TODO: Rename as GetDexRegisterLocation?
- DexRegisterLocation GetLocationKindAndValue(uint16_t dex_register_index,
- uint16_t number_of_dex_registers) const {
- if (!IsDexRegisterLive(dex_register_index)) {
+ // Get the (surface) kind and value of entry at `location_catalog_entry_index`.
+ DexRegisterLocation GetDexRegisterLocation(size_t location_catalog_entry_index) const {
+ if (location_catalog_entry_index == kNoLocationEntryIndex) {
return DexRegisterLocation::None();
}
- size_t offset = FindLocationOffset(dex_register_index, number_of_dex_registers);
+ size_t offset = FindLocationOffset(location_catalog_entry_index);
// Read the first byte and inspect its first 3 bits to get the location.
ShortLocation first_byte = region_.LoadUnaligned<ShortLocation>(offset);
DexRegisterLocation::Kind kind = ExtractKindFromShortLocation(first_byte);
@@ -354,31 +328,6 @@
}
}
- int32_t GetStackOffsetInBytes(uint16_t dex_register_index,
- uint16_t number_of_dex_registers) const {
- DexRegisterLocation location =
- GetLocationKindAndValue(dex_register_index, number_of_dex_registers);
- DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
- // GetLocationKindAndValue returns the offset in bytes.
- return location.GetValue();
- }
-
- int32_t GetConstant(uint16_t dex_register_index, uint16_t number_of_dex_registers) const {
- DexRegisterLocation location =
- GetLocationKindAndValue(dex_register_index, number_of_dex_registers);
- DCHECK(location.GetKind() == DexRegisterLocation::Kind::kConstant);
- return location.GetValue();
- }
-
- int32_t GetMachineRegister(uint16_t dex_register_index, uint16_t number_of_dex_registers) const {
- DexRegisterLocation location =
- GetLocationKindAndValue(dex_register_index, number_of_dex_registers);
- DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister
- || location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister)
- << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
- return location.GetValue();
- }
-
// Compute the compressed kind of `location`.
static DexRegisterLocation::Kind ComputeCompressedKind(const DexRegisterLocation& location) {
switch (location.GetInternalKind()) {
@@ -388,22 +337,21 @@
case DexRegisterLocation::Kind::kInRegister:
DCHECK_GE(location.GetValue(), 0);
- DCHECK_LT(location.GetValue(), 1 << DexRegisterMap::kValueBits);
+ DCHECK_LT(location.GetValue(), 1 << kValueBits);
return DexRegisterLocation::Kind::kInRegister;
case DexRegisterLocation::Kind::kInFpuRegister:
DCHECK_GE(location.GetValue(), 0);
- DCHECK_LT(location.GetValue(), 1 << DexRegisterMap::kValueBits);
+ DCHECK_LT(location.GetValue(), 1 << kValueBits);
return DexRegisterLocation::Kind::kInFpuRegister;
case DexRegisterLocation::Kind::kInStack:
- DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0);
- return IsUint<DexRegisterMap::kValueBits>(location.GetValue() / kFrameSlotSize)
+ return IsShortStackOffsetValue(location.GetValue())
? DexRegisterLocation::Kind::kInStack
: DexRegisterLocation::Kind::kInStackLargeOffset;
case DexRegisterLocation::Kind::kConstant:
- return IsUint<DexRegisterMap::kValueBits>(location.GetValue())
+ return IsShortConstantValue(location.GetValue())
? DexRegisterLocation::Kind::kConstant
: DexRegisterLocation::Kind::kConstantLargeValue;
@@ -423,11 +371,10 @@
return true;
case DexRegisterLocation::Kind::kInStack:
- DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0);
- return IsUint<kValueBits>(location.GetValue() / kFrameSlotSize);
+ return IsShortStackOffsetValue(location.GetValue());
case DexRegisterLocation::Kind::kConstant:
- return IsUint<kValueBits>(location.GetValue());
+ return IsShortConstantValue(location.GetValue());
default:
UNREACHABLE();
@@ -435,9 +382,7 @@
}
static size_t EntrySize(const DexRegisterLocation& location) {
- return CanBeEncodedAsShortLocation(location)
- ? DexRegisterMap::SingleShortEntrySize()
- : DexRegisterMap::SingleLargeEntrySize();
+ return CanBeEncodedAsShortLocation(location) ? SingleShortEntrySize() : SingleLargeEntrySize();
}
static size_t SingleShortEntrySize() {
@@ -452,10 +397,14 @@
return region_.size();
}
- static constexpr int kLiveBitMaskOffset = 0;
- static constexpr int kFixedSize = kLiveBitMaskOffset;
+ // Special (invalid) Dex register location catalog entry index meaning
+ // that there is no location for a given Dex register (i.e., it is
+ // mapped to a DexRegisterLocation::Kind::kNone location).
+ static constexpr size_t kNoLocationEntryIndex = -1;
private:
+ static constexpr int kFixedSize = 0;
+
// Width of the kind "field" in a short location, in bits.
static constexpr size_t kKindBits = 3;
// Width of the value "field" in a short location, in bits.
@@ -466,10 +415,24 @@
static constexpr size_t kKindOffset = 0;
static constexpr size_t kValueOffset = kKindBits;
+ static bool IsShortStackOffsetValue(int32_t value) {
+ DCHECK_EQ(value % kFrameSlotSize, 0);
+ return IsShortValue(value / kFrameSlotSize);
+ }
+
+ static bool IsShortConstantValue(int32_t value) {
+ return IsShortValue(value);
+ }
+
+ static bool IsShortValue(int32_t value) {
+ return IsUint<kValueBits>(value);
+ }
+
static ShortLocation MakeShortLocation(DexRegisterLocation::Kind kind, int32_t value) {
- DCHECK(IsUint<kKindBits>(static_cast<uint8_t>(kind))) << static_cast<uint8_t>(kind);
- DCHECK(IsUint<kValueBits>(value)) << value;
- return (static_cast<uint8_t>(kind) & kKindMask) << kKindOffset
+ uint8_t kind_integer_value = static_cast<uint8_t>(kind);
+ DCHECK(IsUint<kKindBits>(kind_integer_value)) << kind_integer_value;
+ DCHECK(IsShortValue(value)) << value;
+ return (kind_integer_value & kKindMask) << kKindOffset
| (value & kValueMask) << kValueOffset;
}
@@ -497,6 +460,210 @@
friend class StackMapStream;
};
+/* Information on Dex register locations for a specific PC, mapping a
+ * stack map's Dex register to a location entry in a DexRegisterLocationCatalog.
+ * The information is of the form:
+ * [live_bit_mask, entries*]
+ * where entries are concatenated unsigned integer values encoded on a number
+ * of bits (fixed per DexRegisterMap instances of a CodeInfo object) depending
+ * on the number of entries in the Dex register location catalog
+ * (see DexRegisterMap::SingleEntrySizeInBits). The map is 1-byte aligned.
+ */
+class DexRegisterMap {
+ public:
+ explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
+
+ // Get the surface kind of Dex register `dex_register_number`.
+ DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number,
+ uint16_t number_of_dex_registers,
+ const CodeInfo& code_info) const {
+ return DexRegisterLocation::ConvertToSurfaceKind(
+ GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info));
+ }
+
+ // Get the internal kind of Dex register `dex_register_number`.
+ DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_number,
+ uint16_t number_of_dex_registers,
+ const CodeInfo& code_info) const;
+
+ // Get the Dex register location `dex_register_number`.
+ DexRegisterLocation GetDexRegisterLocation(uint16_t dex_register_number,
+ uint16_t number_of_dex_registers,
+ const CodeInfo& code_info) const;
+
+ int32_t GetStackOffsetInBytes(uint16_t dex_register_number,
+ uint16_t number_of_dex_registers,
+ const CodeInfo& code_info) const {
+ DexRegisterLocation location =
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
+ DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
+ // GetDexRegisterLocation returns the offset in bytes.
+ return location.GetValue();
+ }
+
+ int32_t GetConstant(uint16_t dex_register_number,
+ uint16_t number_of_dex_registers,
+ const CodeInfo& code_info) const {
+ DexRegisterLocation location =
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
+ DCHECK(location.GetKind() == DexRegisterLocation::Kind::kConstant);
+ return location.GetValue();
+ }
+
+ int32_t GetMachineRegister(uint16_t dex_register_number,
+ uint16_t number_of_dex_registers,
+ const CodeInfo& code_info) const {
+ DexRegisterLocation location =
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
+ DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister
+ || location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister)
+ << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
+ return location.GetValue();
+ }
+
+ // Get the index of the entry in the Dex register location catalog
+ // corresponding to `dex_register_number`.
+ size_t GetLocationCatalogEntryIndex(uint16_t dex_register_number,
+ uint16_t number_of_dex_registers,
+ size_t number_of_location_catalog_entries) const {
+ if (!IsDexRegisterLive(dex_register_number)) {
+ return DexRegisterLocationCatalog::kNoLocationEntryIndex;
+ }
+
+ if (number_of_location_catalog_entries == 1) {
+ // We do not allocate space for location maps in the case of a
+ // single-entry location catalog, as it is useless. The only valid
+ // entry index is 0;
+ return 0;
+ }
+
+ // The bit offset of the beginning of the map locations.
+ size_t map_locations_offset_in_bits =
+ GetLocationMappingDataOffset(number_of_dex_registers) * kBitsPerByte;
+ size_t index_in_dex_register_map = GetIndexInDexRegisterMap(dex_register_number);
+ DCHECK_LT(index_in_dex_register_map, GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The bit size of an entry.
+ size_t map_entry_size_in_bits = SingleEntrySizeInBits(number_of_location_catalog_entries);
+ // The bit offset where `index_in_dex_register_map` is located.
+ size_t entry_offset_in_bits =
+ map_locations_offset_in_bits + index_in_dex_register_map * map_entry_size_in_bits;
+ size_t location_catalog_entry_index =
+ region_.LoadBits(entry_offset_in_bits, map_entry_size_in_bits);
+ DCHECK_LT(location_catalog_entry_index, number_of_location_catalog_entries);
+ return location_catalog_entry_index;
+ }
+
+ // Map entry at `index_in_dex_register_map` to `location_catalog_entry_index`.
+ void SetLocationCatalogEntryIndex(size_t index_in_dex_register_map,
+ size_t location_catalog_entry_index,
+ uint16_t number_of_dex_registers,
+ size_t number_of_location_catalog_entries) {
+ DCHECK_LT(index_in_dex_register_map, GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ DCHECK_LT(location_catalog_entry_index, number_of_location_catalog_entries);
+
+ if (number_of_location_catalog_entries == 1) {
+ // We do not allocate space for location maps in the case of a
+ // single-entry location catalog, as it is useless.
+ return;
+ }
+
+ // The bit offset of the beginning of the map locations.
+ size_t map_locations_offset_in_bits =
+ GetLocationMappingDataOffset(number_of_dex_registers) * kBitsPerByte;
+ // The bit size of an entry.
+ size_t map_entry_size_in_bits = SingleEntrySizeInBits(number_of_location_catalog_entries);
+ // The bit offset where `index_in_dex_register_map` is located.
+ size_t entry_offset_in_bits =
+ map_locations_offset_in_bits + index_in_dex_register_map * map_entry_size_in_bits;
+ region_.StoreBits(entry_offset_in_bits, location_catalog_entry_index, map_entry_size_in_bits);
+ }
+
+ void SetLiveBitMask(uint16_t number_of_dex_registers,
+ const BitVector& live_dex_registers_mask) {
+ size_t live_bit_mask_offset_in_bits = GetLiveBitMaskOffset() * kBitsPerByte;
+ for (uint16_t i = 0; i < number_of_dex_registers; ++i) {
+ region_.StoreBit(live_bit_mask_offset_in_bits + i, live_dex_registers_mask.IsBitSet(i));
+ }
+ }
+
+ bool IsDexRegisterLive(uint16_t dex_register_number) const {
+ size_t live_bit_mask_offset_in_bits = GetLiveBitMaskOffset() * kBitsPerByte;
+ return region_.LoadBit(live_bit_mask_offset_in_bits + dex_register_number);
+ }
+
+ size_t GetNumberOfLiveDexRegisters(uint16_t number_of_dex_registers) const {
+ size_t number_of_live_dex_registers = 0;
+ for (size_t i = 0; i < number_of_dex_registers; ++i) {
+ if (IsDexRegisterLive(i)) {
+ ++number_of_live_dex_registers;
+ }
+ }
+ return number_of_live_dex_registers;
+ }
+
+ static size_t GetLiveBitMaskOffset() {
+ return kFixedSize;
+ }
+
+ // Compute the size of the live register bit mask (in bytes), for a
+ // method having `number_of_dex_registers` Dex registers.
+ static size_t GetLiveBitMaskSize(uint16_t number_of_dex_registers) {
+ return RoundUp(number_of_dex_registers, kBitsPerByte) / kBitsPerByte;
+ }
+
+ static size_t GetLocationMappingDataOffset(uint16_t number_of_dex_registers) {
+ return GetLiveBitMaskOffset() + GetLiveBitMaskSize(number_of_dex_registers);
+ }
+
+ size_t GetLocationMappingDataSize(uint16_t number_of_dex_registers,
+ size_t number_of_location_catalog_entries) const {
+ size_t location_mapping_data_size_in_bits =
+ GetNumberOfLiveDexRegisters(number_of_dex_registers)
+ * SingleEntrySizeInBits(number_of_location_catalog_entries);
+ return RoundUp(location_mapping_data_size_in_bits, kBitsPerByte) / kBitsPerByte;
+ }
+
+ // Return the size of a map entry in bits. Note that if
+ // `number_of_location_catalog_entries` equals 1, this function returns 0,
+ // which is fine, as there is no need to allocate a map for a
+ // single-entry location catalog; the only valid location catalog entry index
+ // for a live register in this case is 0 and there is no need to
+ // store it.
+ static size_t SingleEntrySizeInBits(size_t number_of_location_catalog_entries) {
+ // Handle the case of 0, as we cannot pass 0 to art::WhichPowerOf2.
+ return number_of_location_catalog_entries == 0
+ ? 0u
+ : WhichPowerOf2(RoundUpToPowerOfTwo(number_of_location_catalog_entries));
+ }
+
+ // Return the size of the DexRegisterMap object, in bytes.
+ size_t Size() const {
+ return region_.size();
+ }
+
+ private:
+ // Return the index in the Dex register map corresponding to the Dex
+ // register number `dex_register_number`.
+ size_t GetIndexInDexRegisterMap(uint16_t dex_register_number) const {
+ if (!IsDexRegisterLive(dex_register_number)) {
+ return kInvalidIndexInDexRegisterMap;
+ }
+ return GetNumberOfLiveDexRegisters(dex_register_number);
+ }
+
+ // Special (invalid) Dex register map entry index meaning that there
+ // is no index in the map for a given Dex register (i.e., it must
+ // have been mapped to a DexRegisterLocation::Kind::kNone location).
+ static constexpr size_t kInvalidIndexInDexRegisterMap = -1;
+
+ static constexpr int kFixedSize = 0;
+
+ MemoryRegion region_;
+
+ friend class CodeInfo;
+ friend class StackMapStream;
+};
+
/**
* A Stack Map holds compilation information for a specific PC necessary for:
* - Mapping it to a dex PC,
@@ -506,7 +673,8 @@
* - Knowing the values of dex registers.
*
* The information is of the form:
- * [dex_pc, native_pc_offset, dex_register_map_offset, inlining_info_offset, register_mask, stack_mask].
+ * [dex_pc, native_pc_offset, dex_register_map_offset, inlining_info_offset, register_mask,
+ * stack_mask].
*
* Note that register_mask is fixed size, but stack_mask is variable size, depending on the
* stack size of a method.
@@ -515,63 +683,41 @@
public:
explicit StackMap(MemoryRegion region) : region_(region) {}
- uint32_t GetDexPc() const {
- return region_.LoadUnaligned<uint32_t>(kDexPcOffset);
- }
+ uint32_t GetDexPc(const CodeInfo& info) const;
- void SetDexPc(uint32_t dex_pc) {
- region_.StoreUnaligned<uint32_t>(kDexPcOffset, dex_pc);
- }
+ void SetDexPc(const CodeInfo& info, uint32_t dex_pc);
- uint32_t GetNativePcOffset() const {
- return region_.LoadUnaligned<uint32_t>(kNativePcOffsetOffset);
- }
+ uint32_t GetNativePcOffset(const CodeInfo& info) const;
- void SetNativePcOffset(uint32_t native_pc_offset) {
- region_.StoreUnaligned<uint32_t>(kNativePcOffsetOffset, native_pc_offset);
- }
+ void SetNativePcOffset(const CodeInfo& info, uint32_t native_pc_offset);
- uint32_t GetDexRegisterMapOffset() const {
- return region_.LoadUnaligned<uint32_t>(kDexRegisterMapOffsetOffset);
- }
+ uint32_t GetDexRegisterMapOffset(const CodeInfo& info) const;
- void SetDexRegisterMapOffset(uint32_t offset) {
- region_.StoreUnaligned<uint32_t>(kDexRegisterMapOffsetOffset, offset);
- }
+ void SetDexRegisterMapOffset(const CodeInfo& info, uint32_t offset);
- uint32_t GetInlineDescriptorOffset() const {
- return region_.LoadUnaligned<uint32_t>(kInlineDescriptorOffsetOffset);
- }
+ uint32_t GetInlineDescriptorOffset(const CodeInfo& info) const;
- void SetInlineDescriptorOffset(uint32_t offset) {
- region_.StoreUnaligned<uint32_t>(kInlineDescriptorOffsetOffset, offset);
- }
+ void SetInlineDescriptorOffset(const CodeInfo& info, uint32_t offset);
- uint32_t GetRegisterMask() const {
- return region_.LoadUnaligned<uint32_t>(kRegisterMaskOffset);
- }
+ uint32_t GetRegisterMask(const CodeInfo& info) const;
- void SetRegisterMask(uint32_t mask) {
- region_.StoreUnaligned<uint32_t>(kRegisterMaskOffset, mask);
- }
+ void SetRegisterMask(const CodeInfo& info, uint32_t mask);
- MemoryRegion GetStackMask() const {
- return region_.Subregion(kStackMaskOffset, StackMaskSize());
- }
+ MemoryRegion GetStackMask(const CodeInfo& info) const;
- void SetStackMask(const BitVector& sp_map) {
- MemoryRegion region = GetStackMask();
+ void SetStackMask(const CodeInfo& info, const BitVector& sp_map) {
+ MemoryRegion region = GetStackMask(info);
for (size_t i = 0; i < region.size_in_bits(); i++) {
region.StoreBit(i, sp_map.IsBitSet(i));
}
}
- bool HasDexRegisterMap() const {
- return GetDexRegisterMapOffset() != kNoDexRegisterMap;
+ bool HasDexRegisterMap(const CodeInfo& info) const {
+ return GetDexRegisterMapOffset(info) != kNoDexRegisterMap;
}
- bool HasInlineInfo() const {
- return GetInlineDescriptorOffset() != kNoInlineInfo;
+ bool HasInlineInfo(const CodeInfo& info) const {
+ return GetInlineDescriptorOffset(info) != kNoInlineInfo;
}
bool Equals(const StackMap& other) const {
@@ -579,9 +725,12 @@
&& region_.size() == other.region_.size();
}
- static size_t ComputeStackMapSize(size_t stack_mask_size) {
- return StackMap::kFixedSize + stack_mask_size;
- }
+ static size_t ComputeStackMapSize(size_t stack_mask_size,
+ size_t inline_info_size,
+ size_t dex_register_map_size,
+ size_t dex_pc_max,
+ size_t native_pc_max,
+ size_t register_mask_max);
// Special (invalid) offset for the DexRegisterMapOffset field meaning
// that there is no Dex register map for this stack map.
@@ -592,18 +741,17 @@
static constexpr uint32_t kNoInlineInfo = -1;
private:
+ static size_t ComputeStackMapSizeInternal(size_t stack_mask_size,
+ size_t number_of_bytes_for_inline_info,
+ size_t number_of_bytes_for_dex_map,
+ size_t number_of_bytes_for_dex_pc,
+ size_t number_of_bytes_for_native_pc,
+ size_t number_of_bytes_for_register_mask);
+
// TODO: Instead of plain types such as "uint32_t", introduce
// typedefs (and document the memory layout of StackMap).
- static constexpr int kDexPcOffset = 0;
- static constexpr int kNativePcOffsetOffset = kDexPcOffset + sizeof(uint32_t);
- static constexpr int kDexRegisterMapOffsetOffset = kNativePcOffsetOffset + sizeof(uint32_t);
- static constexpr int kInlineDescriptorOffsetOffset =
- kDexRegisterMapOffsetOffset + sizeof(uint32_t);
- static constexpr int kRegisterMaskOffset = kInlineDescriptorOffsetOffset + sizeof(uint32_t);
- static constexpr int kFixedSize = kRegisterMaskOffset + sizeof(uint32_t);
- static constexpr int kStackMaskOffset = kFixedSize;
-
- size_t StackMaskSize() const { return region_.size() - kFixedSize; }
+ static constexpr int kRegisterMaskOffset = 0;
+ static constexpr int kFixedSize = 0;
MemoryRegion region_;
@@ -615,7 +763,8 @@
/**
* Wrapper around all compiler information collected for a method.
* The information is of the form:
- * [overall_size, number_of_stack_maps, stack_mask_size, StackMap+, DexRegisterInfo+, InlineInfo*].
+ * [overall_size, number_of_location_catalog_entries, number_of_stack_maps, stack_mask_size,
+ * DexRegisterLocationCatalog+, StackMap+, DexRegisterMap+, InlineInfo*].
*/
class CodeInfo {
public:
@@ -626,6 +775,118 @@
region_ = MemoryRegion(const_cast<void*>(data), size);
}
+ static size_t EncodingSizeInBytes(size_t max_element) {
+ DCHECK(IsUint<32>(max_element));
+ return (max_element == 0) ? 0
+ : IsUint<8>(max_element) ? 1
+ : IsUint<16>(max_element) ? 2
+ : IsUint<24>(max_element) ? 3
+ : 4;
+ }
+
+ void SetEncoding(size_t inline_info_size,
+ size_t dex_register_map_size,
+ size_t dex_pc_max,
+ size_t native_pc_max,
+ size_t register_mask_max) {
+ if (inline_info_size != 0) {
+ region_.StoreBit(kHasInlineInfoBitOffset, 1);
+ // + 1 to also encode kNoInlineInfo: if an inline info offset
+ // is at 0xFF, we want to overflow to a larger encoding, because it will
+ // conflict with kNoInlineInfo.
+ // The offset is relative to the dex register map. TODO: Change this.
+ SetEncodingAt(kInlineInfoBitOffset,
+ EncodingSizeInBytes(dex_register_map_size + inline_info_size + 1));
+ } else {
+ region_.StoreBit(kHasInlineInfoBitOffset, 0);
+ SetEncodingAt(kInlineInfoBitOffset, 0);
+ }
+ // + 1 to also encode kNoDexRegisterMap: if a dex register map offset
+ // is at 0xFF, we want to overflow to a larger encoding, because it will
+ // conflict with kNoDexRegisterMap.
+ SetEncodingAt(kDexRegisterMapBitOffset, EncodingSizeInBytes(dex_register_map_size + 1));
+ SetEncodingAt(kDexPcBitOffset, EncodingSizeInBytes(dex_pc_max));
+ SetEncodingAt(kNativePcBitOffset, EncodingSizeInBytes(native_pc_max));
+ SetEncodingAt(kRegisterMaskBitOffset, EncodingSizeInBytes(register_mask_max));
+ }
+
+ void SetEncodingAt(size_t bit_offset, size_t number_of_bytes) {
+ // We encode the number of bytes needed for writing a value on 3 bits,
+ // for values that we know are maximum 32bits.
+ region_.StoreBit(bit_offset, (number_of_bytes & 1));
+ region_.StoreBit(bit_offset + 1, (number_of_bytes & 2));
+ region_.StoreBit(bit_offset + 2, (number_of_bytes & 4));
+ }
+
+ size_t GetNumberOfBytesForEncoding(size_t bit_offset) const {
+ return region_.LoadBit(bit_offset)
+ + (region_.LoadBit(bit_offset + 1) << 1)
+ + (region_.LoadBit(bit_offset + 2) << 2);
+ }
+
+ bool HasInlineInfo() const {
+ return region_.LoadBit(kHasInlineInfoBitOffset);
+ }
+
+ size_t NumberOfBytesForInlineInfo() const {
+ return GetNumberOfBytesForEncoding(kInlineInfoBitOffset);
+ }
+
+ size_t NumberOfBytesForDexRegisterMap() const {
+ return GetNumberOfBytesForEncoding(kDexRegisterMapBitOffset);
+ }
+
+ size_t NumberOfBytesForRegisterMask() const {
+ return GetNumberOfBytesForEncoding(kRegisterMaskBitOffset);
+ }
+
+ size_t NumberOfBytesForNativePc() const {
+ return GetNumberOfBytesForEncoding(kNativePcBitOffset);
+ }
+
+ size_t NumberOfBytesForDexPc() const {
+ return GetNumberOfBytesForEncoding(kDexPcBitOffset);
+ }
+
+ size_t ComputeStackMapRegisterMaskOffset() const {
+ return StackMap::kRegisterMaskOffset;
+ }
+
+ size_t ComputeStackMapStackMaskOffset() const {
+ return ComputeStackMapRegisterMaskOffset()
+ + (NumberOfBytesForRegisterMask() * sizeof(uint8_t));
+ }
+
+ size_t ComputeStackMapDexPcOffset() const {
+ return ComputeStackMapStackMaskOffset() + GetStackMaskSize();
+ }
+
+ size_t ComputeStackMapNativePcOffset() const {
+ return ComputeStackMapDexPcOffset()
+ + (NumberOfBytesForDexPc() * sizeof(uint8_t));
+ }
+
+ size_t ComputeStackMapDexRegisterMapOffset() const {
+ return ComputeStackMapNativePcOffset()
+ + (NumberOfBytesForNativePc() * sizeof(uint8_t));
+ }
+
+ size_t ComputeStackMapInlineInfoOffset() const {
+ CHECK(HasInlineInfo());
+ return ComputeStackMapDexRegisterMapOffset()
+ + (NumberOfBytesForDexRegisterMap() * sizeof(uint8_t));
+ }
+
+ uint32_t GetDexRegisterLocationCatalogOffset() const {
+ return kFixedSize;
+ }
+
+ DexRegisterLocationCatalog GetDexRegisterLocationCatalog() const {
+ return DexRegisterLocationCatalog(region_.Subregion(
+ GetDexRegisterLocationCatalogOffset(),
+ GetDexRegisterLocationCatalogSize()));
+ }
+
StackMap GetStackMapAt(size_t i) const {
size_t size = StackMapSize();
return StackMap(GetStackMaps().Subregion(i * size, size));
@@ -639,6 +900,19 @@
region_.StoreUnaligned<uint32_t>(kOverallSizeOffset, size);
}
+ uint32_t GetNumberOfDexRegisterLocationCatalogEntries() const {
+ return region_.LoadUnaligned<uint32_t>(kNumberOfDexRegisterLocationCatalogEntriesOffset);
+ }
+
+ void SetNumberOfDexRegisterLocationCatalogEntries(uint32_t num_entries) {
+ region_.StoreUnaligned<uint32_t>(kNumberOfDexRegisterLocationCatalogEntriesOffset, num_entries);
+ }
+
+ uint32_t GetDexRegisterLocationCatalogSize() const {
+ return ComputeDexRegisterLocationCatalogSize(GetDexRegisterLocationCatalogOffset(),
+ GetNumberOfDexRegisterLocationCatalogEntries());
+ }
+
uint32_t GetStackMaskSize() const {
return region_.LoadUnaligned<uint32_t>(kStackMaskSizeOffset);
}
@@ -658,29 +932,38 @@
// Get the size of one stack map of this CodeInfo object, in bytes.
// All stack maps of a CodeInfo have the same size.
size_t StackMapSize() const {
- return StackMap::ComputeStackMapSize(GetStackMaskSize());
+ return StackMap::ComputeStackMapSizeInternal(GetStackMaskSize(),
+ NumberOfBytesForInlineInfo(),
+ NumberOfBytesForDexRegisterMap(),
+ NumberOfBytesForDexPc(),
+ NumberOfBytesForNativePc(),
+ NumberOfBytesForRegisterMask());
}
// Get the size all the stack maps of this CodeInfo object, in bytes.
- size_t StackMapsSize() const {
+ size_t GetStackMapsSize() const {
return StackMapSize() * GetNumberOfStackMaps();
}
+ size_t GetDexRegisterMapsOffset() const {
+ return GetStackMapsOffset() + GetStackMapsSize();
+ }
+
uint32_t GetStackMapsOffset() const {
- return kFixedSize;
+ return GetDexRegisterLocationCatalogOffset() + GetDexRegisterLocationCatalogSize();
}
DexRegisterMap GetDexRegisterMapOf(StackMap stack_map, uint32_t number_of_dex_registers) const {
- DCHECK(stack_map.HasDexRegisterMap());
- uint32_t offset = stack_map.GetDexRegisterMapOffset();
- size_t size = ComputeDexRegisterMapSize(offset, number_of_dex_registers);
+ DCHECK(stack_map.HasDexRegisterMap(*this));
+ uint32_t offset = GetDexRegisterMapsOffset() + stack_map.GetDexRegisterMapOffset(*this);
+ size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
return DexRegisterMap(region_.Subregion(offset, size));
}
InlineInfo GetInlineInfoOf(StackMap stack_map) const {
- DCHECK(stack_map.HasInlineInfo());
- uint32_t offset = stack_map.GetInlineDescriptorOffset();
- uint8_t depth = region_.Load<uint8_t>(offset);
+ DCHECK(stack_map.HasInlineInfo(*this));
+ uint32_t offset = stack_map.GetInlineDescriptorOffset(*this) + GetDexRegisterMapsOffset();
+ uint8_t depth = region_.LoadUnaligned<uint8_t>(offset);
return InlineInfo(region_.Subregion(offset,
InlineInfo::kFixedSize + depth * InlineInfo::SingleEntrySize()));
}
@@ -688,7 +971,7 @@
StackMap GetStackMapForDexPc(uint32_t dex_pc) const {
for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
StackMap stack_map = GetStackMapAt(i);
- if (stack_map.GetDexPc() == dex_pc) {
+ if (stack_map.GetDexPc(*this) == dex_pc) {
return stack_map;
}
}
@@ -700,7 +983,7 @@
// TODO: stack maps are sorted by native pc, we can do a binary search.
for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
StackMap stack_map = GetStackMapAt(i);
- if (stack_map.GetNativePcOffset() == native_pc_offset) {
+ if (stack_map.GetNativePcOffset(*this) == native_pc_offset) {
return stack_map;
}
}
@@ -708,48 +991,81 @@
UNREACHABLE();
}
+ void Dump(std::ostream& os, uint16_t number_of_dex_registers) const;
+ void DumpStackMapHeader(std::ostream& os, size_t stack_map_num) const;
+
private:
// TODO: Instead of plain types such as "uint32_t", introduce
// typedefs (and document the memory layout of CodeInfo).
static constexpr int kOverallSizeOffset = 0;
- static constexpr int kNumberOfStackMapsOffset = kOverallSizeOffset + sizeof(uint32_t);
+ static constexpr int kEncodingInfoOffset = kOverallSizeOffset + sizeof(uint32_t);
+ static constexpr int kNumberOfDexRegisterLocationCatalogEntriesOffset =
+ kEncodingInfoOffset + sizeof(uint16_t);
+ static constexpr int kNumberOfStackMapsOffset =
+ kNumberOfDexRegisterLocationCatalogEntriesOffset + sizeof(uint32_t);
static constexpr int kStackMaskSizeOffset = kNumberOfStackMapsOffset + sizeof(uint32_t);
static constexpr int kFixedSize = kStackMaskSizeOffset + sizeof(uint32_t);
+ static constexpr int kHasInlineInfoBitOffset = (kEncodingInfoOffset * kBitsPerByte);
+ static constexpr int kInlineInfoBitOffset = kHasInlineInfoBitOffset + 1;
+ static constexpr int kDexRegisterMapBitOffset = kInlineInfoBitOffset + 3;
+ static constexpr int kDexPcBitOffset = kDexRegisterMapBitOffset + 3;
+ static constexpr int kNativePcBitOffset = kDexPcBitOffset + 3;
+ static constexpr int kRegisterMaskBitOffset = kNativePcBitOffset + 3;
+
MemoryRegion GetStackMaps() const {
return region_.size() == 0
? MemoryRegion()
- : region_.Subregion(kFixedSize, StackMapsSize());
+ : region_.Subregion(GetStackMapsOffset(), GetStackMapsSize());
}
- // Compute the size of a Dex register map starting at offset `origin` in
- // `region_` and containing `number_of_dex_registers` locations.
- size_t ComputeDexRegisterMapSize(uint32_t origin, uint32_t number_of_dex_registers) const {
- // TODO: Ideally, we would like to use art::DexRegisterMap::Size or
- // art::DexRegisterMap::FindLocationOffset, but the DexRegisterMap is not
- // yet built. Try to factor common code.
- size_t offset =
- origin + DexRegisterMap::GetDexRegisterMapLocationsOffset(number_of_dex_registers);
+ // Compute the size of the Dex register map associated to the stack map at
+ // `dex_register_map_offset_in_code_info`.
+ size_t ComputeDexRegisterMapSizeOf(uint32_t dex_register_map_offset_in_code_info,
+ uint16_t number_of_dex_registers) const {
+ // Offset where the actual mapping data starts within art::DexRegisterMap.
+ size_t location_mapping_data_offset_in_dex_register_map =
+ DexRegisterMap::GetLocationMappingDataOffset(number_of_dex_registers);
+ // Create a temporary art::DexRegisterMap to be able to call
+ // art::DexRegisterMap::GetNumberOfLiveDexRegisters and
+ DexRegisterMap dex_register_map_without_locations(
+ MemoryRegion(region_.Subregion(dex_register_map_offset_in_code_info,
+ location_mapping_data_offset_in_dex_register_map)));
+ size_t number_of_live_dex_registers =
+ dex_register_map_without_locations.GetNumberOfLiveDexRegisters(number_of_dex_registers);
+ size_t location_mapping_data_size_in_bits =
+ DexRegisterMap::SingleEntrySizeInBits(GetNumberOfDexRegisterLocationCatalogEntries())
+ * number_of_live_dex_registers;
+ size_t location_mapping_data_size_in_bytes =
+ RoundUp(location_mapping_data_size_in_bits, kBitsPerByte) / kBitsPerByte;
+ size_t dex_register_map_size =
+ location_mapping_data_offset_in_dex_register_map + location_mapping_data_size_in_bytes;
+ return dex_register_map_size;
+ }
- // Create a temporary DexRegisterMap to be able to call DexRegisterMap.IsDexRegisterLive.
- DexRegisterMap only_live_mask(MemoryRegion(region_.Subregion(origin, offset - origin)));
+ // Compute the size of a Dex register location catalog starting at offset `origin`
+ // in `region_` and containing `number_of_dex_locations` entries.
+ size_t ComputeDexRegisterLocationCatalogSize(uint32_t origin,
+ uint32_t number_of_dex_locations) const {
+ // TODO: Ideally, we would like to use art::DexRegisterLocationCatalog::Size or
+ // art::DexRegisterLocationCatalog::FindLocationOffset, but the
+ // DexRegisterLocationCatalog is not yet built. Try to factor common code.
+ size_t offset = origin + DexRegisterLocationCatalog::kFixedSize;
- // Skip the first `number_of_dex_registers - 1` entries.
- for (uint16_t i = 0; i < number_of_dex_registers; ++i) {
- if (only_live_mask.IsDexRegisterLive(i)) {
- // Read the first next byte and inspect its first 3 bits to decide
- // whether it is a short or a large location.
- DexRegisterMap::ShortLocation first_byte =
- region_.LoadUnaligned<DexRegisterMap::ShortLocation>(offset);
- DexRegisterLocation::Kind kind =
- DexRegisterMap::ExtractKindFromShortLocation(first_byte);
- if (DexRegisterLocation::IsShortLocationKind(kind)) {
- // Short location. Skip the current byte.
- offset += DexRegisterMap::SingleShortEntrySize();
- } else {
- // Large location. Skip the 5 next bytes.
- offset += DexRegisterMap::SingleLargeEntrySize();
- }
+ // Skip the first `number_of_dex_locations - 1` entries.
+ for (uint16_t i = 0; i < number_of_dex_locations; ++i) {
+ // Read the first next byte and inspect its first 3 bits to decide
+ // whether it is a short or a large location.
+ DexRegisterLocationCatalog::ShortLocation first_byte =
+ region_.LoadUnaligned<DexRegisterLocationCatalog::ShortLocation>(offset);
+ DexRegisterLocation::Kind kind =
+ DexRegisterLocationCatalog::ExtractKindFromShortLocation(first_byte);
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Skip the current byte.
+ offset += DexRegisterLocationCatalog::SingleShortEntrySize();
+ } else {
+ // Large location. Skip the 5 next bytes.
+ offset += DexRegisterLocationCatalog::SingleLargeEntrySize();
}
}
size_t size = offset - origin;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index affb6cd..e1a07e9 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2155,8 +2155,9 @@
Runtime* runtime = Runtime::Current();
const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
- StackMap map = m->GetStackMap(native_pc_offset);
- MemoryRegion mask = map.GetStackMask();
+ CodeInfo code_info = m->GetOptimizedCodeInfo();
+ StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ MemoryRegion mask = map.GetStackMask(code_info);
// Visit stack entries that hold pointers.
for (size_t i = 0; i < mask.size_in_bits(); ++i) {
if (mask.LoadBit(i)) {
@@ -2173,7 +2174,7 @@
}
}
// Visit callee-save registers that hold pointers.
- uint32_t register_mask = map.GetRegisterMask();
+ uint32_t register_mask = map.GetRegisterMask(code_info);
for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
if (register_mask & (1 << i)) {
mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
diff --git a/runtime/thread.h b/runtime/thread.h
index da7af83..9d4d89d 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -573,6 +573,21 @@
OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
}
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_pos));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_end));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_objects));
+ }
+
// Size of stack less any space reserved for stack overflow
size_t GetStackSize() const {
return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 88be23f..ea0a642 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -17,6 +17,7 @@
#include "trace.h"
#include <sys/uio.h>
+#include <unistd.h>
#define ATRACE_TAG ATRACE_TAG_DALVIK
#include "cutils/trace.h"
@@ -327,7 +328,7 @@
}
void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags,
- bool direct_to_ddms, bool sampling_enabled, int interval_us) {
+ TraceOutputMode output_mode, TraceMode trace_mode, int interval_us) {
Thread* self = Thread::Current();
{
MutexLock mu(self, *Locks::trace_lock_);
@@ -338,7 +339,7 @@
}
// Check interval if sampling is enabled
- if (sampling_enabled && interval_us <= 0) {
+ if (trace_mode == TraceMode::kSampling && interval_us <= 0) {
LOG(ERROR) << "Invalid sampling interval: " << interval_us;
ScopedObjectAccess soa(self);
ThrowRuntimeException("Invalid sampling interval: %d", interval_us);
@@ -347,7 +348,7 @@
// Open trace file if not going directly to ddms.
std::unique_ptr<File> trace_file;
- if (!direct_to_ddms) {
+ if (output_mode != TraceOutputMode::kDDMS) {
if (trace_fd < 0) {
trace_file.reset(OS::CreateEmptyFile(trace_filename));
} else {
@@ -376,8 +377,8 @@
LOG(ERROR) << "Trace already in progress, ignoring this request";
} else {
enable_stats = (flags && kTraceCountAllocs) != 0;
- the_trace_ = new Trace(trace_file.release(), buffer_size, flags, sampling_enabled);
- if (sampling_enabled) {
+ the_trace_ = new Trace(trace_file.release(), buffer_size, flags, trace_mode);
+ if (trace_mode == TraceMode::kSampling) {
CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread,
reinterpret_cast<void*>(interval_us)),
"Sampling profiler thread");
@@ -426,7 +427,7 @@
stop_alloc_counting = (the_trace->flags_ & kTraceCountAllocs) != 0;
the_trace->FinishTracing();
- if (the_trace->sampling_enabled_) {
+ if (the_trace->trace_mode_ == TraceMode::kSampling) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
} else {
@@ -464,16 +465,21 @@
MutexLock mu(Thread::Current(), *Locks::trace_lock_);
if (the_trace_ == NULL) {
return kTracingInactive;
- } else if (the_trace_->sampling_enabled_) {
- return kSampleProfilingActive;
} else {
- return kMethodTracingActive;
+ switch (the_trace_->trace_mode_) {
+ case TraceMode::kSampling:
+ return kSampleProfilingActive;
+ case TraceMode::kMethodTracing:
+ return kMethodTracingActive;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
}
-Trace::Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled)
+Trace::Trace(File* trace_file, int buffer_size, int flags, TraceMode trace_mode)
: trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags),
- sampling_enabled_(sampling_enabled), clock_source_(default_clock_source_),
+ trace_mode_(trace_mode), clock_source_(default_clock_source_),
buffer_size_(buffer_size), start_time_(MicroTime()),
clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) {
// Set up the beginning of the trace.
@@ -534,6 +540,7 @@
os << StringPrintf("num-method-calls=%zd\n", num_records);
os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_);
os << StringPrintf("vm=art\n");
+ os << StringPrintf("pid=%d\n", getpid());
if ((flags_ & kTraceCountAllocs) != 0) {
os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS));
os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES));
diff --git a/runtime/trace.h b/runtime/trace.h
index dd8186a..80f926f 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -51,10 +51,20 @@
kTraceCountAllocs = 1,
};
+ enum class TraceOutputMode {
+ kFile,
+ kDDMS
+ };
+
+ enum class TraceMode {
+ kMethodTracing,
+ kSampling
+ };
+
static void SetDefaultClockSource(TraceClockSource clock_source);
static void Start(const char* trace_filename, int trace_fd, int buffer_size, int flags,
- bool direct_to_ddms, bool sampling_enabled, int interval_us)
+ TraceOutputMode output_mode, TraceMode trace_mode, int interval_us)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_,
@@ -107,7 +117,7 @@
static void StoreExitingThreadInfo(Thread* thread);
private:
- explicit Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled);
+ explicit Trace(File* trace_file, int buffer_size, int flags, TraceMode trace_mode);
// The sampling interval in microseconds is passed as an argument.
static void* RunSamplingThread(void* arg) LOCKS_EXCLUDED(Locks::trace_lock_);
@@ -148,7 +158,7 @@
const int flags_;
// True if traceview should sample instead of instrumenting method entry/exit.
- const bool sampling_enabled_;
+ const TraceMode trace_mode_;
const TraceClockSource clock_source_;
diff --git a/runtime/utf.cc b/runtime/utf.cc
index 39c8d15..3d13c3e 100644
--- a/runtime/utf.cc
+++ b/runtime/utf.cc
@@ -67,15 +67,39 @@
void ConvertUtf16ToModifiedUtf8(char* utf8_out, const uint16_t* utf16_in, size_t char_count) {
while (char_count--) {
- uint16_t ch = *utf16_in++;
+ const uint16_t ch = *utf16_in++;
if (ch > 0 && ch <= 0x7f) {
*utf8_out++ = ch;
} else {
+ // char_count == 0 here implies we've encountered an unpaired
+ // surrogate and we have no choice but to encode it as 3-byte UTF
+ // sequence. Note that unpaired surrogates can occur as a part of
+ // "normal" operation.
+ if ((ch >= 0xd800 && ch <= 0xdbff) && (char_count > 0)) {
+ const uint16_t ch2 = *utf16_in;
+
+ // Check if the other half of the pair is within the expected
+ // range. If it isn't, we will have to emit both "halves" as
+ // separate 3 byte sequences.
+ if (ch2 >= 0xdc00 && ch2 <= 0xdfff) {
+ utf16_in++;
+ char_count--;
+ const uint32_t code_point = (ch << 10) + ch2 - 0x035fdc00;
+ *utf8_out++ = (code_point >> 18) | 0xf0;
+ *utf8_out++ = ((code_point >> 12) & 0x3f) | 0x80;
+ *utf8_out++ = ((code_point >> 6) & 0x3f) | 0x80;
+ *utf8_out++ = (code_point & 0x3f) | 0x80;
+ continue;
+ }
+ }
+
if (ch > 0x07ff) {
+ // Three byte encoding.
*utf8_out++ = (ch >> 12) | 0xe0;
*utf8_out++ = ((ch >> 6) & 0x3f) | 0x80;
*utf8_out++ = (ch & 0x3f) | 0x80;
} else /*(ch > 0x7f || ch == 0)*/ {
+ // Two byte encoding.
*utf8_out++ = (ch >> 6) | 0xc0;
*utf8_out++ = (ch & 0x3f) | 0x80;
}
@@ -147,15 +171,32 @@
size_t CountUtf8Bytes(const uint16_t* chars, size_t char_count) {
size_t result = 0;
while (char_count--) {
- uint16_t ch = *chars++;
+ const uint16_t ch = *chars++;
if (ch > 0 && ch <= 0x7f) {
++result;
- } else {
- if (ch > 0x7ff) {
- result += 3;
+ } else if (ch >= 0xd800 && ch <= 0xdbff) {
+ if (char_count > 0) {
+ const uint16_t ch2 = *chars;
+ // If we find a properly paired surrogate, we emit it as a 4 byte
+ // UTF sequence. If we find an unpaired leading or trailing surrogate,
+ // we emit it as a 3 byte sequence like would have done earlier.
+ if (ch2 >= 0xdc00 && ch2 <= 0xdfff) {
+ chars++;
+ char_count--;
+
+ result += 4;
+ } else {
+ result += 3;
+ }
} else {
- result += 2;
+ // This implies we found an unpaired trailing surrogate at the end
+ // of a string.
+ result += 3;
}
+ } else if (ch > 0x7ff) {
+ result += 3;
+ } else {
+ result += 2;
}
}
return result;
diff --git a/runtime/utf_test.cc b/runtime/utf_test.cc
index 8048bbd..94a6ea5 100644
--- a/runtime/utf_test.cc
+++ b/runtime/utf_test.cc
@@ -19,6 +19,8 @@
#include "common_runtime_test.h"
#include "utf-inl.h"
+#include <vector>
+
namespace art {
class UtfTest : public CommonRuntimeTest {};
@@ -110,4 +112,52 @@
EXPECT_EQ(2u, CountModifiedUtf8Chars(reinterpret_cast<const char *>(kSurrogateEncoding)));
}
+static void AssertConversion(const std::vector<uint16_t> input,
+ const std::vector<uint8_t> expected) {
+ ASSERT_EQ(expected.size(), CountUtf8Bytes(&input[0], input.size()));
+
+ std::vector<uint8_t> output(expected.size());
+ ConvertUtf16ToModifiedUtf8(reinterpret_cast<char*>(&output[0]), &input[0], input.size());
+ EXPECT_EQ(expected, output);
+}
+
+TEST_F(UtfTest, CountAndConvertUtf8Bytes) {
+ // Surrogate pairs will be converted into 4 byte sequences.
+ AssertConversion({ 0xd801, 0xdc00 }, { 0xf0, 0x90, 0x90, 0x80 });
+
+ // Three byte encodings that are below & above the leading surrogate
+ // range respectively.
+ AssertConversion({ 0xdef0 }, { 0xed, 0xbb, 0xb0 });
+ AssertConversion({ 0xdcff }, { 0xed, 0xb3, 0xbf });
+ // Two byte encoding.
+ AssertConversion({ 0x0101 }, { 0xc4, 0x81 });
+
+ // Two byte special case : 0 must use an overlong encoding.
+ AssertConversion({ 0x0101, 0x0000 }, { 0xc4, 0x81, 0xc0, 0x80 });
+
+ // One byte encoding.
+ AssertConversion({ 'h', 'e', 'l', 'l', 'o' }, { 0x68, 0x65, 0x6c, 0x6c, 0x6f });
+
+ AssertConversion({
+ 0xd802, 0xdc02, // Surrogate pair
+ 0xdef0, 0xdcff, // Three byte encodings
+ 0x0101, 0x0000, // Two byte encodings
+ 'p' , 'p' // One byte encoding
+ }, {
+ 0xf0, 0x90, 0xa0, 0x82,
+ 0xed, 0xbb, 0xb0, 0xed, 0xb3, 0xbf,
+ 0xc4, 0x81, 0xc0, 0x80,
+ 0x70, 0x70
+ });
+}
+
+TEST_F(UtfTest, CountAndConvertUtf8Bytes_UnpairedSurrogate) {
+ // Unpaired trailing surrogate at the end of input.
+ AssertConversion({ 'h', 'e', 0xd801 }, { 'h', 'e', 0xed, 0xa0, 0x81 });
+ // Unpaired (or incorrectly paired) surrogates in the middle of the input.
+ AssertConversion({ 'h', 0xd801, 'e' }, { 'h', 0xed, 0xa0, 0x81, 'e' });
+ AssertConversion({ 'h', 0xd801, 0xd801, 'e' }, { 'h', 0xed, 0xa0, 0x81, 0xed, 0xa0, 0x81, 'e' });
+ AssertConversion({ 'h', 0xdc00, 0xdc00, 'e' }, { 'h', 0xed, 0xb0, 0x80, 0xed, 0xb0, 0x80, 'e' });
+}
+
} // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index 9a9f51a..e6a6b1d 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -109,11 +109,17 @@
DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
// Check whether an N-bit two's-complement representation can hold value.
-static inline bool IsInt(int N, intptr_t value) {
- CHECK_LT(0, N);
- CHECK_LT(N, kBitsPerIntPtrT);
- intptr_t limit = static_cast<intptr_t>(1) << (N - 1);
- return (-limit <= value) && (value < limit);
+template <typename T>
+static inline bool IsInt(int N, T value) {
+ int bitsPerT = sizeof(T) * kBitsPerByte;
+ if (N == bitsPerT) {
+ return true;
+ } else {
+ CHECK_LT(0, N);
+ CHECK_LT(N, bitsPerT);
+ T limit = static_cast<T>(1) << (N - 1);
+ return (-limit <= value) && (value < limit);
+ }
}
template <typename T>
@@ -311,19 +317,6 @@
return (ch < ' ' || ch > '~');
}
-// Interpret the bit pattern of input (type U) as type V. Requires the size
-// of V >= size of U (compile-time checked).
-template<typename U, typename V>
-static inline V bit_cast(U in) {
- static_assert(sizeof(U) <= sizeof(V), "Size of U not <= size of V");
- union {
- U u;
- V v;
- } tmp;
- tmp.u = in;
- return tmp.v;
-}
-
std::string PrintableChar(uint16_t ch);
// Returns an ASCII string corresponding to the given UTF-8 string.
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 6b36c19..833427e 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -432,4 +432,79 @@
EXPECT_EQ(32u, MinimumBitsToStore(~static_cast<uint32_t>(0)));
}
+static constexpr int64_t INT_MIN_minus1 = static_cast<int64_t>(INT_MIN) - 1;
+static constexpr int64_t INT_MAX_plus1 = static_cast<int64_t>(INT_MAX) + 1;
+static constexpr int64_t UINT_MAX_plus1 = static_cast<int64_t>(UINT_MAX) + 1;
+
+TEST_F(UtilsTest, IsInt) {
+ EXPECT_FALSE(IsInt(1, -2));
+ EXPECT_TRUE(IsInt(1, -1));
+ EXPECT_TRUE(IsInt(1, 0));
+ EXPECT_FALSE(IsInt(1, 1));
+
+ EXPECT_FALSE(IsInt(4, -9));
+ EXPECT_TRUE(IsInt(4, -8));
+ EXPECT_TRUE(IsInt(4, 7));
+ EXPECT_FALSE(IsInt(4, 8));
+
+ EXPECT_FALSE(IsInt(32, INT_MIN_minus1));
+ EXPECT_TRUE(IsInt(32, INT_MIN));
+ EXPECT_TRUE(IsInt(32, INT_MAX));
+ EXPECT_FALSE(IsInt(32, INT_MAX_plus1));
+}
+
+TEST_F(UtilsTest, IsInt_Static) {
+ EXPECT_FALSE(IsInt<1>(-2));
+ EXPECT_TRUE(IsInt<1>(-1));
+ EXPECT_TRUE(IsInt<1>(0));
+ EXPECT_FALSE(IsInt<1>(1));
+
+ EXPECT_FALSE(IsInt<4>(-9));
+ EXPECT_TRUE(IsInt<4>(-8));
+ EXPECT_TRUE(IsInt<4>(7));
+ EXPECT_FALSE(IsInt<4>(8));
+
+ EXPECT_FALSE(IsInt<32>(INT_MIN_minus1));
+ EXPECT_TRUE(IsInt<32>(INT_MIN));
+ EXPECT_TRUE(IsInt<32>(INT_MAX));
+ EXPECT_FALSE(IsInt<32>(INT_MAX_plus1));
+}
+
+TEST_F(UtilsTest, IsUint) {
+ EXPECT_FALSE(IsUint<1>(-1));
+ EXPECT_TRUE(IsUint<1>(0));
+ EXPECT_TRUE(IsUint<1>(1));
+ EXPECT_FALSE(IsUint<1>(2));
+
+ EXPECT_FALSE(IsUint<4>(-1));
+ EXPECT_TRUE(IsUint<4>(0));
+ EXPECT_TRUE(IsUint<4>(15));
+ EXPECT_FALSE(IsUint<4>(16));
+
+ EXPECT_FALSE(IsUint<32>(-1));
+ EXPECT_TRUE(IsUint<32>(0));
+ EXPECT_TRUE(IsUint<32>(UINT_MAX));
+ EXPECT_FALSE(IsUint<32>(UINT_MAX_plus1));
+}
+
+TEST_F(UtilsTest, IsAbsoluteUint) {
+ EXPECT_FALSE(IsAbsoluteUint<1>(-2));
+ EXPECT_TRUE(IsAbsoluteUint<1>(-1));
+ EXPECT_TRUE(IsAbsoluteUint<32>(0));
+ EXPECT_TRUE(IsAbsoluteUint<1>(1));
+ EXPECT_FALSE(IsAbsoluteUint<1>(2));
+
+ EXPECT_FALSE(IsAbsoluteUint<4>(-16));
+ EXPECT_TRUE(IsAbsoluteUint<4>(-15));
+ EXPECT_TRUE(IsAbsoluteUint<32>(0));
+ EXPECT_TRUE(IsAbsoluteUint<4>(15));
+ EXPECT_FALSE(IsAbsoluteUint<4>(16));
+
+ EXPECT_FALSE(IsAbsoluteUint<32>(-UINT_MAX_plus1));
+ EXPECT_TRUE(IsAbsoluteUint<32>(-UINT_MAX));
+ EXPECT_TRUE(IsAbsoluteUint<32>(0));
+ EXPECT_TRUE(IsAbsoluteUint<32>(UINT_MAX));
+ EXPECT_FALSE(IsAbsoluteUint<32>(UINT_MAX_plus1));
+}
+
} // namespace art
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 9ceb6f4..1d04192 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1086,7 +1086,7 @@
const uint16_t* insns = code_item_->insns_ + cur_offset;
/* make sure the start of the switch is in range */
int32_t switch_offset = insns[1] | ((int32_t) insns[2]) << 16;
- if ((int32_t) cur_offset + switch_offset < 0 || cur_offset + switch_offset + 2 >= insn_count) {
+ if ((int32_t) cur_offset + switch_offset < 0 || cur_offset + switch_offset + 2 > insn_count) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch start: at " << cur_offset
<< ", switch offset " << switch_offset
<< ", count " << insn_count;
@@ -1752,8 +1752,21 @@
Fail(VERIFY_ERROR_NO_CLASS) << " can't resolve returned type '" << return_type
<< "' or '" << reg_type << "'";
} else {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "returning '" << reg_type
- << "', but expected from declaration '" << return_type << "'";
+ bool soft_error = false;
+ // Check whether arrays are involved. They will show a valid class status, even
+ // if their components are erroneous.
+ if (reg_type.IsArrayTypes() && return_type.IsArrayTypes()) {
+ return_type.CanAssignArray(reg_type, reg_types_, class_loader_, &soft_error);
+ if (soft_error) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "array with erroneous component type: "
+ << reg_type << " vs " << return_type;
+ }
+ }
+
+ if (!soft_error) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "returning '" << reg_type
+ << "', but expected from declaration '" << return_type << "'";
+ }
}
}
}
@@ -2742,9 +2755,17 @@
break;
// Special instructions.
- case Instruction::RETURN_VOID_BARRIER:
- if (!IsConstructor() || IsStatic()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void-barrier not expected";
+ case Instruction::RETURN_VOID_NO_BARRIER:
+ if (IsConstructor() && !IsStatic()) {
+ auto& declaring_class = GetDeclaringClass();
+ auto* klass = declaring_class.GetClass();
+ for (uint32_t i = 0, num_fields = klass->NumInstanceFields(); i < num_fields; ++i) {
+ if (klass->GetInstanceField(i)->IsFinal()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void-no-barrier not expected for "
+ << PrettyField(klass->GetInstanceField(i));
+ break;
+ }
+ }
}
break;
// Note: the following instructions encode offsets derived from class linking.
@@ -3017,7 +3038,7 @@
// For returns we only care about the operand to the return, all other registers are dead.
const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
Instruction::Code opcode = ret_inst->Opcode();
- if ((opcode == Instruction::RETURN_VOID) || (opcode == Instruction::RETURN_VOID_BARRIER)) {
+ if (opcode == Instruction::RETURN_VOID || opcode == Instruction::RETURN_VOID_NO_BARRIER) {
SafelyMarkAllRegistersAsConflicts(this, work_line_.get());
} else {
if (opcode == Instruction::RETURN_WIDE) {
@@ -3898,7 +3919,7 @@
{
StackHandleScope<1> hs(self_);
HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
- field_type_class = h_field->GetType(can_load_classes_);
+ field_type_class = can_load_classes_ ? h_field->GetType<true>() : h_field->GetType<false>();
}
if (field_type_class != nullptr) {
field_type = ®_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
@@ -4014,7 +4035,7 @@
{
StackHandleScope<1> hs(Thread::Current());
HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
- field_type_class = h_field->GetType(can_load_classes_);
+ field_type_class = can_load_classes_ ? h_field->GetType<true>() : h_field->GetType<false>();
}
if (field_type_class != nullptr) {
@@ -4163,7 +4184,7 @@
// Initialize them as conflicts so they don't add to GC and deoptimization information.
const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn);
Instruction::Code opcode = ret_inst->Opcode();
- if ((opcode == Instruction::RETURN_VOID) || (opcode == Instruction::RETURN_VOID_BARRIER)) {
+ if (opcode == Instruction::RETURN_VOID || opcode == Instruction::RETURN_VOID_NO_BARRIER) {
SafelyMarkAllRegistersAsConflicts(this, target_line);
} else {
target_line->CopyFromLine(merge_line);
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 201169f..97d0cbe 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -822,5 +822,42 @@
return os;
}
+bool RegType::CanAssignArray(const RegType& src, RegTypeCache& reg_types,
+ Handle<mirror::ClassLoader> class_loader, bool* soft_error) const {
+ if (!IsArrayTypes() || !src.IsArrayTypes()) {
+ *soft_error = false;
+ return false;
+ }
+
+ const RegType& cmp1 = reg_types.GetComponentType(*this, class_loader.Get());
+ const RegType& cmp2 = reg_types.GetComponentType(src, class_loader.Get());
+
+ if (cmp1.IsAssignableFrom(cmp2)) {
+ return true;
+ }
+ if (cmp1.IsUnresolvedTypes()) {
+ if (cmp2.IsIntegralTypes() || cmp2.IsFloatTypes() || cmp2.IsArrayTypes()) {
+ *soft_error = false;
+ return false;
+ }
+ *soft_error = true;
+ return false;
+ }
+ if (cmp2.IsUnresolvedTypes()) {
+ if (cmp1.IsIntegralTypes() || cmp1.IsFloatTypes() || cmp1.IsArrayTypes()) {
+ *soft_error = false;
+ return false;
+ }
+ *soft_error = true;
+ return false;
+ }
+ if (!cmp1.IsArrayTypes() || !cmp2.IsArrayTypes()) {
+ *soft_error = false;
+ return false;
+ }
+ return cmp1.CanAssignArray(cmp2, reg_types, class_loader, soft_error);
+}
+
+
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 73e131e..d260650 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -25,6 +25,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "gc_root.h"
+#include "handle_scope.h"
#include "object_callbacks.h"
#include "primitive.h"
@@ -205,6 +206,17 @@
bool IsAssignableFrom(const RegType& src) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Can this array type potentially be assigned by src.
+ // This function is necessary as array types are valid even if their components types are not,
+ // e.g., when they component type could not be resolved. The function will return true iff the
+ // types are assignable. It will return false otherwise. In case of return=false, soft_error
+ // will be set to true iff the assignment test failure should be treated as a soft-error, i.e.,
+ // when both array types have the same 'depth' and the 'final' component types may be assignable
+ // (both are reference types).
+ bool CanAssignArray(const RegType& src, RegTypeCache& reg_types,
+ Handle<mirror::ClassLoader> class_loader, bool* soft_error) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Can this type be assigned by src? Variant of IsAssignableFrom that doesn't
// allow assignment to
// an interface from an Object.
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 78185bf..d389244 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -109,7 +109,6 @@
jfieldID WellKnownClasses::java_lang_Throwable_stackState;
jfieldID WellKnownClasses::java_lang_Throwable_suppressedExceptions;
jfieldID WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod;
-jfieldID WellKnownClasses::java_lang_reflect_Field_artField;
jfieldID WellKnownClasses::java_lang_reflect_Proxy_h;
jfieldID WellKnownClasses::java_nio_DirectByteBuffer_capacity;
jfieldID WellKnownClasses::java_nio_DirectByteBuffer_effectiveDirectAddress;
@@ -244,7 +243,6 @@
java_lang_Throwable_stackState = CacheField(env, java_lang_Throwable, false, "stackState", "Ljava/lang/Object;");
java_lang_Throwable_suppressedExceptions = CacheField(env, java_lang_Throwable, false, "suppressedExceptions", "Ljava/util/List;");
java_lang_reflect_AbstractMethod_artMethod = CacheField(env, java_lang_reflect_AbstractMethod, false, "artMethod", "Ljava/lang/reflect/ArtMethod;");
- java_lang_reflect_Field_artField = CacheField(env, java_lang_reflect_Field, false, "artField", "Ljava/lang/reflect/ArtField;");
java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, false, "h", "Ljava/lang/reflect/InvocationHandler;");
java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I");
java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "effectiveDirectAddress", "J");
@@ -269,7 +267,7 @@
void WellKnownClasses::LateInit(JNIEnv* env) {
ScopedLocalRef<jclass> java_lang_Runtime(env, env->FindClass("java/lang/Runtime"));
- java_lang_Runtime_nativeLoad = CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad", "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/String;");
+ java_lang_Runtime_nativeLoad = CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad", "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;");
}
mirror::Class* WellKnownClasses::ToClass(jclass global_jclass) {
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 1a4f0f8..2df1c0e 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -103,7 +103,6 @@
static jfieldID dalvik_system_DexPathList__Element_dexFile;
static jfieldID dalvik_system_PathClassLoader_pathList;
static jfieldID java_lang_reflect_AbstractMethod_artMethod;
- static jfieldID java_lang_reflect_Field_artField;
static jfieldID java_lang_reflect_Proxy_h;
static jfieldID java_lang_Thread_daemon;
static jfieldID java_lang_Thread_group;
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 40be56c..76ef4a9 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -57,14 +57,15 @@
// We eliminate the non-live registers at a return, so only v3 is live.
// Note that it is OK for a compiler to not have a dex map at this dex PC because
// a return is not necessarily a safepoint.
- CHECK_REGS_CONTAIN_REFS(0x13U, false); // v3: y
- CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x13U, false, 3); // v3: y
+ // Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
+ CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
+ CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1); // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
+ CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1); // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
// v5 is removed from the root set because there is a "merge" operation.
// See 0015: if-nez v2, 001f.
- CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
+ CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
CHECK_REGS_CONTAIN_REFS(0x2cU, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java
index 3fe3881..59f7001 100644
--- a/test/046-reflect/src/Main.java
+++ b/test/046-reflect/src/Main.java
@@ -696,27 +696,34 @@
private static void checkGetDeclaredConstructor() {
try {
Method.class.getDeclaredConstructor().setAccessible(true);
- System.out.print("Didn't get an exception from method getDeclaredConstructor");
+ System.out.print("Didn't get an exception from Method.class.getDeclaredConstructor().setAccessible");
+ } catch (SecurityException e) {
} catch (NoSuchMethodException e) {
} catch (Exception e) {
System.out.print(e);
}
try {
Field.class.getDeclaredConstructor().setAccessible(true);
- System.out.print("Didn't get an exception from field getDeclaredConstructor");
+ System.out.print("Didn't get an exception from Field.class.getDeclaredConstructor().setAccessible");
+ } catch (SecurityException e) {
} catch (NoSuchMethodException e) {
} catch (Exception e) {
System.out.print(e);
}
try {
Class.class.getDeclaredConstructor().setAccessible(true);
- System.out.print("Didn't get an exception from class getDeclaredConstructor()");
+ System.out.print("Didn't get an exception from Class.class.getDeclaredConstructor().setAccessible");
} catch (SecurityException e) {
+ } catch (NoSuchMethodException e) {
} catch (Exception e) {
System.out.print(e);
}
}
+ static void checkPrivateFieldAccess() {
+ (new OtherClass()).test();
+ }
+
public static void main(String[] args) throws Exception {
Main test = new Main();
test.run();
@@ -730,6 +737,7 @@
checkUnique();
checkParametrizedTypeEqualsAndHashCode();
checkGenericArrayTypeEqualsAndHashCode();
+ checkPrivateFieldAccess();
}
}
@@ -801,41 +809,41 @@
}
class FieldNoisyInit {
- static {
- System.out.println("FieldNoisyInit is initializing");
- //Throwable th = new Throwable();
- //th.printStackTrace();
- }
+ static {
+ System.out.println("FieldNoisyInit is initializing");
+ //Throwable th = new Throwable();
+ //th.printStackTrace();
+ }
}
class FieldNoisyInitUser {
- static {
- System.out.println("FieldNoisyInitUser is initializing");
- }
- public static int staticField;
- public static FieldNoisyInit noisy;
+ static {
+ System.out.println("FieldNoisyInitUser is initializing");
+ }
+ public static int staticField;
+ public static FieldNoisyInit noisy;
}
class MethodNoisyInit {
- static {
- System.out.println("MethodNoisyInit is initializing");
- //Throwable th = new Throwable();
- //th.printStackTrace();
- }
+ static {
+ System.out.println("MethodNoisyInit is initializing");
+ //Throwable th = new Throwable();
+ //th.printStackTrace();
+ }
}
class MethodNoisyInitUser {
- static {
- System.out.println("MethodNoisyInitUser is initializing");
- }
- public static void staticMethod() {}
- public void createMethodNoisyInit(MethodNoisyInit ni) {}
+ static {
+ System.out.println("MethodNoisyInitUser is initializing");
+ }
+ public static void staticMethod() {}
+ public void createMethodNoisyInit(MethodNoisyInit ni) {}
}
class Thrower {
- public Thrower() throws UnsupportedOperationException {
- throw new UnsupportedOperationException();
- }
+ public Thrower() throws UnsupportedOperationException {
+ throw new UnsupportedOperationException();
+ }
}
class ParametrizedTypeTest {
@@ -847,3 +855,17 @@
public void aMethod(T[] names) {}
public void aMethodIdentical(T[] names) {}
}
+
+class OtherClass {
+ private static final long LONG = 1234;
+ public void test() {
+ try {
+ Field field = getClass().getDeclaredField("LONG");
+ if (1234 != field.getLong(null)) {
+ System.out.println("ERROR: values don't match");
+ }
+ } catch (Exception e) {
+ System.out.println(e);
+ }
+ }
+}
\ No newline at end of file
diff --git a/test/134-reg-promotion/smali/Test.smali b/test/134-reg-promotion/smali/Test.smali
index 6a35c45..68d29dd 100644
--- a/test/134-reg-promotion/smali/Test.smali
+++ b/test/134-reg-promotion/smali/Test.smali
@@ -36,3 +36,28 @@
:end
return-void
.end method
+
+.method public static run2()V
+ .registers 4
+ new-instance v2, Ljava/lang/String;
+ invoke-direct {v2}, Ljava/lang/String;-><init>()V
+ const/4 v0, 0
+ move v1, v0
+ :start
+ invoke-static {}, LMain;->blowup()V
+ if-ne v1, v0, :end
+ const/4 v2, 1
+ invoke-static {v2}, Ljava/lang/Integer;->toString(I)Ljava/lang/String;
+ move-result-object v3
+ if-nez v3, :skip
+ const/4 v0, 0
+ :skip
+ # The Phi merging 0 with 0 hides the constant from the Quick compiler.
+ move v2, v0
+ # The call makes v2 float type.
+ invoke-static {v2}, Ljava/lang/Float;->isNaN(F)Z
+ const/4 v1, 1
+ goto :start
+ :end
+ return-void
+.end method
diff --git a/test/134-reg-promotion/src/Main.java b/test/134-reg-promotion/src/Main.java
index d45ec66..008ac58 100644
--- a/test/134-reg-promotion/src/Main.java
+++ b/test/134-reg-promotion/src/Main.java
@@ -38,5 +38,11 @@
m.invoke(null, (Object[]) null);
holder = null;
}
+ m = c.getMethod("run2", (Class[]) null);
+ for (int i = 0; i < 10; i++) {
+ holder = new char[128 * 1024][];
+ m.invoke(null, (Object[]) null);
+ holder = null;
+ }
}
}
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 937d2fe..6b4bc11 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -55,7 +55,7 @@
CHECK_EQ(value, 1u);
CHECK(GetVReg(m, 5, kFloatVReg, &value));
- uint32_t cast = bit_cast<float, uint32_t>(1.0f);
+ uint32_t cast = bit_cast<uint32_t, float>(1.0f);
CHECK_EQ(value, cast);
CHECK(GetVReg(m, 6, kIntVReg, &value));
@@ -95,7 +95,7 @@
CHECK_EQ(value, 0u);
CHECK(GetVRegPair(m, 13, kDoubleLoVReg, kDoubleHiVReg, &value));
- uint64_t cast = bit_cast<double, uint64_t>(2.0);
+ uint64_t cast = bit_cast<uint64_t, double>(2.0);
CHECK_EQ(value, cast);
}
diff --git a/test/455-set-vreg/set_vreg_jni.cc b/test/455-set-vreg/set_vreg_jni.cc
index 24d7832..0a83ac0 100644
--- a/test/455-set-vreg/set_vreg_jni.cc
+++ b/test/455-set-vreg/set_vreg_jni.cc
@@ -60,21 +60,21 @@
CHECK(GetVReg(m, 1, kReferenceVReg, &value));
CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
- CHECK(SetVReg(m, 2, bit_cast<float, uint32_t>(5.0f), kFloatVReg));
- CHECK(SetVReg(m, 3, bit_cast<float, uint32_t>(4.0f), kFloatVReg));
- CHECK(SetVReg(m, 4, bit_cast<float, uint32_t>(3.0f), kFloatVReg));
- CHECK(SetVReg(m, 5, bit_cast<float, uint32_t>(2.0f), kFloatVReg));
- CHECK(SetVReg(m, 6, bit_cast<float, uint32_t>(1.0f), kFloatVReg));
+ CHECK(SetVReg(m, 2, bit_cast<uint32_t, float>(5.0f), kFloatVReg));
+ CHECK(SetVReg(m, 3, bit_cast<uint32_t, float>(4.0f), kFloatVReg));
+ CHECK(SetVReg(m, 4, bit_cast<uint32_t, float>(3.0f), kFloatVReg));
+ CHECK(SetVReg(m, 5, bit_cast<uint32_t, float>(2.0f), kFloatVReg));
+ CHECK(SetVReg(m, 6, bit_cast<uint32_t, float>(1.0f), kFloatVReg));
} else if (m_name.compare("testDoubleVReg") == 0) {
uint32_t value = 0;
CHECK(GetVReg(m, 3, kReferenceVReg, &value));
CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
- CHECK(SetVRegPair(m, 4, bit_cast<double, uint64_t>(5.0), kDoubleLoVReg, kDoubleHiVReg));
- CHECK(SetVRegPair(m, 6, bit_cast<double, uint64_t>(4.0), kDoubleLoVReg, kDoubleHiVReg));
- CHECK(SetVRegPair(m, 8, bit_cast<double, uint64_t>(3.0), kDoubleLoVReg, kDoubleHiVReg));
- CHECK(SetVRegPair(m, 10, bit_cast<double, uint64_t>(2.0), kDoubleLoVReg, kDoubleHiVReg));
- CHECK(SetVRegPair(m, 12, bit_cast<double, uint64_t>(1.0), kDoubleLoVReg, kDoubleHiVReg));
+ CHECK(SetVRegPair(m, 4, bit_cast<uint64_t, double>(5.0), kDoubleLoVReg, kDoubleHiVReg));
+ CHECK(SetVRegPair(m, 6, bit_cast<uint64_t, double>(4.0), kDoubleLoVReg, kDoubleHiVReg));
+ CHECK(SetVRegPair(m, 8, bit_cast<uint64_t, double>(3.0), kDoubleLoVReg, kDoubleHiVReg));
+ CHECK(SetVRegPair(m, 10, bit_cast<uint64_t, double>(2.0), kDoubleLoVReg, kDoubleHiVReg));
+ CHECK(SetVRegPair(m, 12, bit_cast<uint64_t, double>(1.0), kDoubleLoVReg, kDoubleHiVReg));
}
return true;
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index ce701e8..1b32348 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -69,7 +69,7 @@
CHECK_EQ(value, 1u);
CHECK(GetVReg(m, 4, kFloatVReg, &value));
- uint32_t cast = bit_cast<float, uint32_t>(4.0f);
+ uint32_t cast = bit_cast<uint32_t, float>(4.0f);
CHECK_EQ(value, cast);
did_check_ = true;
} else if (m_name.compare("phiEquivalent") == 0) {
@@ -138,7 +138,7 @@
std::unique_ptr<Context> context(Context::Create());
CHECK(soa.Decode<mirror::Object*>(main) == nullptr);
CHECK_EQ(int_value, 0);
- int32_t cast = bit_cast<float, int32_t>(float_value);
+ int32_t cast = bit_cast<int32_t, float>(float_value);
CHECK_EQ(cast, 0);
TestVisitor visitor(soa.Self(), context.get());
visitor.WalkStack();
diff --git a/test/462-checker-inlining-across-dex-files/expected.txt b/test/462-checker-inlining-across-dex-files/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/462-checker-inlining-across-dex-files/expected.txt
diff --git a/test/462-checker-inlining-across-dex-files/info.txt b/test/462-checker-inlining-across-dex-files/info.txt
new file mode 100644
index 0000000..57008c3
--- /dev/null
+++ b/test/462-checker-inlining-across-dex-files/info.txt
@@ -0,0 +1 @@
+Check our inlining heuristics across dex files in optimizing.
diff --git a/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java b/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java
new file mode 100644
index 0000000..cee8e0f
--- /dev/null
+++ b/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java
@@ -0,0 +1,64 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+public class OtherDex {
+ public static void emptyMethod() {
+ }
+
+ public static int returnIntMethod() {
+ return 38;
+ }
+
+ public static int returnOtherDexStatic() {
+ return myStatic;
+ }
+
+ public static int returnMainStatic() {
+ return Main.myStatic;
+ }
+
+ public static int recursiveCall() {
+ return recursiveCall();
+ }
+
+ public static String returnString() {
+ return "OtherDex";
+ }
+
+ public static Class returnOtherDexClass() {
+ return OtherDex.class;
+ }
+
+ public static Class returnMainClass() {
+ return Main.class;
+ }
+
+ private static Class returnOtherDexClass2() {
+ return OtherDex.class;
+ }
+
+ public static Class returnOtherDexClassStaticCall() {
+ // Do not call returnOtherDexClass, as it may have been flagged
+ // as non-inlineable.
+ return returnOtherDexClass2();
+ }
+
+ public static Class returnOtherDexCallingMain() {
+ return Main.getOtherClass();
+ }
+
+ static int myStatic = 1;
+}
diff --git a/test/462-checker-inlining-across-dex-files/src/Main.java b/test/462-checker-inlining-across-dex-files/src/Main.java
new file mode 100644
index 0000000..d5563b8
--- /dev/null
+++ b/test/462-checker-inlining-across-dex-files/src/Main.java
@@ -0,0 +1,202 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+// Add a class that will be the first entry in the dex cache, to
+// avoid having the OtherDex and Main classes share the same cache index.
+class AAA {
+}
+
+public class Main {
+
+ // CHECK-START: void Main.inlineEmptyMethod() inliner (before)
+ // CHECK-DAG: [[Invoke:v\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: ReturnVoid
+
+ // CHECK-START: void Main.inlineEmptyMethod() inliner (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ public static void inlineEmptyMethod() {
+ OtherDex.emptyMethod();
+ }
+
+ // CHECK-START: int Main.inlineReturnIntMethod() inliner (before)
+ // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ // CHECK-START: int Main.inlineReturnIntMethod() inliner (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ // CHECK-START: int Main.inlineReturnIntMethod() inliner (after)
+ // CHECK-DAG: [[Const38:i\d+]] IntConstant 38
+ // CHECK-DAG: Return [ [[Const38]] ]
+
+ public static int inlineReturnIntMethod() {
+ return OtherDex.returnIntMethod();
+ }
+
+ // CHECK-START: int Main.dontInlineOtherDexStatic() inliner (before)
+ // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ // CHECK-START: int Main.dontInlineOtherDexStatic() inliner (after)
+ // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ public static int dontInlineOtherDexStatic() {
+ return OtherDex.returnOtherDexStatic();
+ }
+
+ // CHECK-START: int Main.inlineMainStatic() inliner (before)
+ // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ // CHECK-START: int Main.inlineMainStatic() inliner (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ // CHECK-START: int Main.inlineMainStatic() inliner (after)
+ // CHECK-DAG: [[Static:i\d+]] StaticFieldGet
+ // CHECK-DAG: Return [ [[Static]] ]
+
+ public static int inlineMainStatic() {
+ return OtherDex.returnMainStatic();
+ }
+
+ // CHECK-START: int Main.dontInlineRecursiveCall() inliner (before)
+ // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ // CHECK-START: int Main.dontInlineRecursiveCall() inliner (after)
+ // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ public static int dontInlineRecursiveCall() {
+ return OtherDex.recursiveCall();
+ }
+
+ // CHECK-START: java.lang.String Main.dontInlineReturnString() inliner (before)
+ // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ // CHECK-START: java.lang.String Main.dontInlineReturnString() inliner (after)
+ // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ public static String dontInlineReturnString() {
+ return OtherDex.returnString();
+ }
+
+ // CHECK-START: java.lang.Class Main.dontInlineOtherDexClass() inliner (before)
+ // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ // CHECK-START: java.lang.Class Main.dontInlineOtherDexClass() inliner (after)
+ // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ public static Class dontInlineOtherDexClass() {
+ return OtherDex.returnOtherDexClass();
+ }
+
+ // CHECK-START: java.lang.Class Main.inlineMainClass() inliner (before)
+ // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ // CHECK-START: java.lang.Class Main.inlineMainClass() inliner (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ // CHECK-START: java.lang.Class Main.inlineMainClass() inliner (after)
+ // CHECK-DAG: [[Class:l\d+]] LoadClass
+ // CHECK-DAG: Return [ [[Class]] ]
+
+ public static Class inlineMainClass() {
+ return OtherDex.returnMainClass();
+ }
+
+ // CHECK-START: java.lang.Class Main.dontInlineOtherDexClassStaticCall() inliner (before)
+ // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ // CHECK-START: java.lang.Class Main.dontInlineOtherDexClassStaticCall() inliner (after)
+ // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ public static Class dontInlineOtherDexClassStaticCall() {
+ return OtherDex.returnOtherDexClassStaticCall();
+ }
+
+ // CHECK-START: java.lang.Class Main.inlineOtherDexCallingMain() inliner (before)
+ // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ // CHECK-START: java.lang.Class Main.inlineOtherDexCallingMain() inliner (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ // CHECK-START: java.lang.Class Main.inlineOtherDexCallingMain() inliner (after)
+ // CHECK-DAG: [[Class:l\d+]] LoadClass
+ // CHECK-DAG: Return [ [[Class]] ]
+
+ public static Class inlineOtherDexCallingMain() {
+ return OtherDex.returnOtherDexCallingMain();
+ }
+
+ public static Class getOtherClass() {
+ return Main.class;
+ }
+
+ public static void main(String[] args) {
+ inlineEmptyMethod();
+ if (inlineReturnIntMethod() != 38) {
+ throw new Error("Expected 38");
+ }
+
+ if (dontInlineOtherDexStatic() != 1) {
+ throw new Error("Expected 1");
+ }
+
+ if (inlineMainStatic() != 42) {
+ throw new Error("Expected 42");
+ }
+
+ if (dontInlineReturnString() != "OtherDex") {
+ throw new Error("Expected OtherDex");
+ }
+
+ if (dontInlineOtherDexClass() != OtherDex.class) {
+ throw new Error("Expected " + OtherDex.class);
+ }
+
+ if (dontInlineOtherDexClassStaticCall() != OtherDex.class) {
+ throw new Error("Expected " + OtherDex.class);
+ }
+
+ if (inlineMainClass() != Main.class) {
+ throw new Error("Expected " + Main.class);
+ }
+
+ if (inlineOtherDexCallingMain() != Main.class) {
+ throw new Error("Expected " + Main.class);
+ }
+ }
+
+ // Reference the AAA class to ensure it is in the dex cache.
+ public static Class<?> cls = AAA.class;
+
+ // Add a field that will be the first entry in the dex cache, to
+ // avoid having the OtherDex.myStatic and Main.myStatic fields
+ // share the same cache index.
+ public static int aaa = 32;
+ public static int myStatic = 42;
+}
diff --git a/test/463-checker-boolean-simplifier/expected.txt b/test/463-checker-boolean-simplifier/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/463-checker-boolean-simplifier/expected.txt
diff --git a/test/463-checker-boolean-simplifier/info.txt b/test/463-checker-boolean-simplifier/info.txt
new file mode 100644
index 0000000..9c0493a
--- /dev/null
+++ b/test/463-checker-boolean-simplifier/info.txt
@@ -0,0 +1 @@
+Tests simplification of boolean NOT in optimizing compiler.
diff --git a/test/463-checker-boolean-simplifier/src/Main.java b/test/463-checker-boolean-simplifier/src/Main.java
new file mode 100644
index 0000000..efe0d3f
--- /dev/null
+++ b/test/463-checker-boolean-simplifier/src/Main.java
@@ -0,0 +1,174 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+public class Main {
+
+ // Note #1: `javac` flips the conditions of If statements.
+ // Note #2: In the optimizing compiler, the first input of Phi is always
+ // the fall-through path, i.e. the false branch.
+
+ public static void assertBoolEquals(boolean expected, boolean result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ /*
+ * Elementary test negating a boolean. Verifies that the condition is replaced,
+ * blocks merged and empty branches removed.
+ */
+
+ // CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (before)
+ // CHECK-DAG: [[Param:z\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[NotEq:z\d+]] NotEqual [ [[Param]] [[Const0]] ]
+ // CHECK-DAG: If [ [[NotEq]] ]
+ // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const1]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[Phi]] ]
+
+ // CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (before)
+ // CHECK: Goto
+ // CHECK: Goto
+ // CHECK: Goto
+ // CHECK-NOT: Goto
+
+ // CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (after)
+ // CHECK-DAG: [[Param:z\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Eq:z\d+]] Equal [ [[Param]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[Eq]] ]
+
+ // CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (after)
+ // CHECK-NOT: NotEqual
+ // CHECK-NOT: If
+ // CHECK-NOT: Phi
+
+ // CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (after)
+ // CHECK: Goto
+ // CHECK-NOT: Goto
+
+ public static boolean BooleanNot(boolean x) {
+ return !x;
+ }
+
+ /*
+ * Program which only delegates the condition, i.e. returns 1 when True
+ * and 0 when False.
+ */
+
+ // CHECK-START: boolean Main.GreaterThan(int, int) boolean_simplifier (before)
+ // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
+ // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[Cond:z\d+]] GreaterThan [ [[ParamX]] [[ParamY]] ]
+ // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const0]] [[Const1]] ]
+ // CHECK-DAG: Return [ [[Phi]] ]
+
+ // CHECK-START: boolean Main.GreaterThan(int, int) boolean_simplifier (after)
+ // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
+ // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[Cond:z\d+]] GreaterThan [ [[ParamX]] [[ParamY]] ]
+ // CHECK-DAG: Return [ [[Cond]] ]
+
+ public static boolean GreaterThan(int x, int y) {
+ return (x <= y) ? false : true;
+ }
+
+ /*
+ * Program which negates a condition, i.e. returns 0 when True
+ * and 1 when False.
+ */
+
+ // CHECK-START: boolean Main.LessThan(int, int) boolean_simplifier (before)
+ // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
+ // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[Cond:z\d+]] GreaterThanOrEqual [ [[ParamX]] [[ParamY]] ]
+ // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const1]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[Phi]] ]
+
+ // CHECK-START: boolean Main.LessThan(int, int) boolean_simplifier (after)
+ // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
+ // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[Cond:z\d+]] LessThan [ [[ParamX]] [[ParamY]] ]
+ // CHECK-DAG: Return [ [[Cond]] ]
+
+ public static boolean LessThan(int x, int y) {
+ return (x < y) ? true : false;
+ }
+
+ /*
+ * Program which further uses negated conditions.
+ * Note that Phis are discovered retrospectively.
+ */
+
+ // CHECK-START: boolean Main.ValuesOrdered(int, int, int) boolean_simplifier (before)
+ // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
+ // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
+ // CHECK-DAG: [[ParamZ:i\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[CondXY:z\d+]] GreaterThan [ [[ParamX]] [[ParamY]] ]
+ // CHECK-DAG: If [ [[CondXY]] ]
+ // CHECK-DAG: [[CondYZ:z\d+]] GreaterThan [ [[ParamY]] [[ParamZ]] ]
+ // CHECK-DAG: If [ [[CondYZ]] ]
+ // CHECK-DAG: [[CondXYZ:z\d+]] NotEqual [ [[PhiXY:i\d+]] [[PhiYZ:i\d+]] ]
+ // CHECK-DAG: If [ [[CondXYZ]] ]
+ // CHECK-DAG: Return [ [[PhiXYZ:i\d+]] ]
+ // CHECK-DAG: [[PhiXY]] Phi [ [[Const1]] [[Const0]] ]
+ // CHECK-DAG: [[PhiYZ]] Phi [ [[Const1]] [[Const0]] ]
+ // CHECK-DAG: [[PhiXYZ]] Phi [ [[Const1]] [[Const0]] ]
+
+ // CHECK-START: boolean Main.ValuesOrdered(int, int, int) boolean_simplifier (after)
+ // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
+ // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
+ // CHECK-DAG: [[ParamZ:i\d+]] ParameterValue
+ // CHECK-DAG: [[CmpXY:z\d+]] LessThanOrEqual [ [[ParamX]] [[ParamY]] ]
+ // CHECK-DAG: [[CmpYZ:z\d+]] LessThanOrEqual [ [[ParamY]] [[ParamZ]] ]
+ // CHECK-DAG: [[CmpXYZ:z\d+]] Equal [ [[CmpXY]] [[CmpYZ]] ]
+ // CHECK-DAG: Return [ [[CmpXYZ]] ]
+
+ public static boolean ValuesOrdered(int x, int y, int z) {
+ return (x <= y) == (y <= z);
+ }
+
+ public static void main(String[] args) {
+ assertBoolEquals(false, BooleanNot(true));
+ assertBoolEquals(true, BooleanNot(false));
+ assertBoolEquals(true, GreaterThan(10, 5));
+ assertBoolEquals(false, GreaterThan(10, 10));
+ assertBoolEquals(false, GreaterThan(5, 10));
+ assertBoolEquals(true, LessThan(5, 10));
+ assertBoolEquals(false, LessThan(10, 10));
+ assertBoolEquals(false, LessThan(10, 5));
+ assertBoolEquals(true, ValuesOrdered(1, 3, 5));
+ assertBoolEquals(true, ValuesOrdered(5, 3, 1));
+ assertBoolEquals(false, ValuesOrdered(1, 3, 2));
+ assertBoolEquals(false, ValuesOrdered(2, 3, 1));
+ assertBoolEquals(true, ValuesOrdered(3, 3, 3));
+ assertBoolEquals(true, ValuesOrdered(3, 3, 5));
+ assertBoolEquals(false, ValuesOrdered(5, 5, 3));
+ }
+}
diff --git a/test/464-checker-inline-sharpen-calls/expected.txt b/test/464-checker-inline-sharpen-calls/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/464-checker-inline-sharpen-calls/expected.txt
diff --git a/test/464-checker-inline-sharpen-calls/info.txt b/test/464-checker-inline-sharpen-calls/info.txt
new file mode 100644
index 0000000..9e56030
--- /dev/null
+++ b/test/464-checker-inline-sharpen-calls/info.txt
@@ -0,0 +1 @@
+Check that we inline sharpen calls.
diff --git a/test/464-checker-inline-sharpen-calls/src/Main.java b/test/464-checker-inline-sharpen-calls/src/Main.java
new file mode 100644
index 0000000..1b25b42
--- /dev/null
+++ b/test/464-checker-inline-sharpen-calls/src/Main.java
@@ -0,0 +1,54 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+public final class Main {
+
+ public void invokeVirtual() {
+ }
+
+ // CHECK-START: void Main.inlineSharpenInvokeVirtual(Main) inliner (before)
+ // CHECK-DAG: [[Invoke:v\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: ReturnVoid
+
+ // CHECK-START: void Main.inlineSharpenInvokeVirtual(Main) inliner (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ public static void inlineSharpenInvokeVirtual(Main m) {
+ m.invokeVirtual();
+ }
+
+ // CHECK-START: int Main.inlineSharpenStringInvoke() inliner (before)
+ // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: Return [ [[Invoke]] ]
+
+ // CHECK-START: int Main.inlineSharpenStringInvoke() inliner (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ // CHECK-START: int Main.inlineSharpenStringInvoke() inliner (after)
+ // CHECK-DAG: [[Field:i\d+]] InstanceFieldGet
+ // CHECK-DAG: Return [ [[Field]] ]
+
+ public static int inlineSharpenStringInvoke() {
+ return "Foo".length();
+ }
+
+ public static void main(String[] args) {
+ inlineSharpenInvokeVirtual(new Main());
+ if (inlineSharpenStringInvoke() != 3) {
+ throw new Error("Expected 3");
+ }
+ }
+}
diff --git a/test/465-checker-clinit-gvn/expected.txt b/test/465-checker-clinit-gvn/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/465-checker-clinit-gvn/expected.txt
diff --git a/test/465-checker-clinit-gvn/info.txt b/test/465-checker-clinit-gvn/info.txt
new file mode 100644
index 0000000..ac28a8f
--- /dev/null
+++ b/test/465-checker-clinit-gvn/info.txt
@@ -0,0 +1 @@
+Check that we GVN HClinitCheck instructions.
diff --git a/test/465-checker-clinit-gvn/src/Main.java b/test/465-checker-clinit-gvn/src/Main.java
new file mode 100644
index 0000000..dcaef6f
--- /dev/null
+++ b/test/465-checker-clinit-gvn/src/Main.java
@@ -0,0 +1,78 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+class OtherClass {
+ static {
+ a = 42;
+ b = 54;
+ }
+
+ static int a;
+ static int b;
+}
+
+public final class Main {
+
+ // CHECK-START: int Main.accessTwoStatics() GVN (before)
+ // CHECK-DAG: [[Class1:l\d+]] LoadClass
+ // CHECK-DAG: ClinitCheck [ [[Class1]] ]
+ // CHECK-DAG: [[Class2:l\d+]] LoadClass
+ // CHECK-DAG: ClinitCheck [ [[Class2]] ]
+
+ // CHECK-START: int Main.accessTwoStatics() GVN (after)
+ // CHECK-DAG: [[Class:l\d+]] LoadClass
+ // CHECK-DAG: ClinitCheck [ [[Class]] ]
+ // CHECK-NOT: ClinitCheck
+
+ public static int accessTwoStatics() {
+ return OtherClass.b - OtherClass.a;
+ }
+
+ // CHECK-START: int Main.accessTwoStaticsCallInBetween() GVN (before)
+ // CHECK-DAG: [[Class1:l\d+]] LoadClass
+ // CHECK-DAG: ClinitCheck [ [[Class1]] ]
+ // CHECK-DAG: [[Class2:l\d+]] LoadClass
+ // CHECK-DAG: ClinitCheck [ [[Class2]] ]
+
+ // CHECK-START: int Main.accessTwoStaticsCallInBetween() GVN (after)
+ // CHECK-DAG: [[Class:l\d+]] LoadClass
+ // CHECK-DAG: ClinitCheck [ [[Class]] ]
+ // CHECK-NOT: ClinitCheck
+
+ public static int accessTwoStaticsCallInBetween() {
+ int b = OtherClass.b;
+ foo();
+ return b - OtherClass.a;
+ }
+
+ public static void foo() {
+ try {
+ Thread.sleep(0);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ public static void main(String[] args) {
+ if (accessTwoStatics() != 12) {
+ throw new Error("Expected 12");
+ }
+
+ if (accessTwoStaticsCallInBetween() != 12) {
+ throw new Error("Expected 12");
+ }
+ }
+}
diff --git a/test/467-regalloc-pair/expected.txt b/test/467-regalloc-pair/expected.txt
new file mode 100644
index 0000000..da39d9d
--- /dev/null
+++ b/test/467-regalloc-pair/expected.txt
@@ -0,0 +1 @@
+In interface
diff --git a/test/467-regalloc-pair/info.txt b/test/467-regalloc-pair/info.txt
new file mode 100644
index 0000000..882a29c
--- /dev/null
+++ b/test/467-regalloc-pair/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing's register allocator
+that used to trip when compiling TestCase.testCase on x86.
diff --git a/test/467-regalloc-pair/smali/TestCase.smali b/test/467-regalloc-pair/smali/TestCase.smali
new file mode 100644
index 0000000..a3101fe
--- /dev/null
+++ b/test/467-regalloc-pair/smali/TestCase.smali
@@ -0,0 +1,59 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+
+.super Ljava/lang/Object;
+
+.method public static testCase([BLMain;)V
+ .registers 12
+ const/4 v2, 0
+ array-length v0, v10
+ div-int/lit8 v0, v0, 7
+ invoke-static {v2, v0}, Ljava/lang/Math;->max(II)I
+ move-result v7
+ move v6, v2
+ move v3, v2
+ :label5
+ if-ge v6, v7, :label1
+ const-wide/16 v0, 0
+ move-wide v4, v0
+ move v1, v2
+ move v0, v3
+ :label4
+ const/4 v3, 6
+ if-ge v1, v3, :label2
+ const/16 v3, 8
+ shl-long/2addr v4, v3
+ add-int/lit8 v3, v0, 1
+ aget-byte v0, v10, v0
+ if-gez v0, :label3
+ add-int/lit16 v0, v0, 256
+ :label3
+ int-to-long v8, v0
+ or-long/2addr v4, v8
+ add-int/lit8 v0, v1, 1
+ move v1, v0
+ move v0, v3
+ goto :label4
+ :label2
+ add-int/lit8 v3, v0, 1
+ aget-byte v0, v10, v0
+ invoke-interface {v11, v4, v5, v0}, LItf;->invokeInterface(JI)V
+ add-int/lit8 v0, v6, 1
+ move v6, v0
+ goto :label5
+ :label1
+ return-void
+.end method
diff --git a/test/467-regalloc-pair/src/Main.java b/test/467-regalloc-pair/src/Main.java
new file mode 100644
index 0000000..aac07fd
--- /dev/null
+++ b/test/467-regalloc-pair/src/Main.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+interface Itf {
+ public void invokeInterface(long l, int i);
+}
+
+public class Main implements Itf {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) throws Exception {
+ Class<?> c = Class.forName("TestCase");
+ Method m = c.getMethod("testCase", byte[].class, Main.class);
+ m.invoke(null, new byte[] { 0, 1, 2, 3, 4, 5, 6, 7 }, new Main());
+ }
+
+ public void invokeInterface(long l, int i) {
+ System.out.println("In interface");
+ }
+}
diff --git a/test/468-bool-simplifier-regression/expected.txt b/test/468-bool-simplifier-regression/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/468-bool-simplifier-regression/expected.txt
diff --git a/test/468-bool-simplifier-regression/info.txt b/test/468-bool-simplifier-regression/info.txt
new file mode 100644
index 0000000..0a46584
--- /dev/null
+++ b/test/468-bool-simplifier-regression/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing's boolean simplifier
+that used to trip when a boolean value was the input of an If.
diff --git a/test/468-bool-simplifier-regression/smali/TestCase.smali b/test/468-bool-simplifier-regression/smali/TestCase.smali
new file mode 100644
index 0000000..f36304d
--- /dev/null
+++ b/test/468-bool-simplifier-regression/smali/TestCase.smali
@@ -0,0 +1,32 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+
+.super Ljava/lang/Object;
+
+.field public static value:Z
+
+.method public static testCase()Z
+ .registers 2
+ sget-boolean v0, LTestCase;->value:Z
+ const/4 v1, 1
+ if-eq v0, v1, :label1
+ const/4 v1, 1
+ goto :label2
+ :label1
+ const/4 v1, 0
+ :label2
+ return v1
+.end method
diff --git a/test/468-bool-simplifier-regression/src/Main.java b/test/468-bool-simplifier-regression/src/Main.java
new file mode 100644
index 0000000..1dd27c9
--- /dev/null
+++ b/test/468-bool-simplifier-regression/src/Main.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.*;
+
+public class Main {
+ public static boolean runTest(boolean input) throws Exception {
+ Class<?> c = Class.forName("TestCase");
+ Method m = c.getMethod("testCase");
+ Field f = c.getField("value");
+ f.set(null, (Boolean) input);
+ return (Boolean) m.invoke(null);
+ }
+
+ public static void main(String[] args) throws Exception {
+ if (runTest(true) != false) {
+ throw new Error("Expected false, got true");
+ }
+
+ if (runTest(false) != true) {
+ throw new Error("Expected true, got false");
+ }
+ }
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 019dc14..5922257 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -14,4 +14,5 @@
b/18800943 (2)
MoveExc
MoveExceptionOnEntry
+EmptySparseSwitch
Done!
diff --git a/test/800-smali/smali/EmptySparseSwitch.smali b/test/800-smali/smali/EmptySparseSwitch.smali
new file mode 100644
index 0000000..29592c1
--- /dev/null
+++ b/test/800-smali/smali/EmptySparseSwitch.smali
@@ -0,0 +1,17 @@
+.class public LEmptySparseSwitch;
+
+.super Ljava/lang/Object;
+
+.method public static run()V
+ .registers 2
+
+ const v0, 0
+
+ sparse-switch v0, :SparseSwitch
+
+ return-void
+
+ :SparseSwitch
+ .sparse-switch
+ .end sparse-switch
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index b23896d..3e0b1f9 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -77,6 +77,8 @@
null));
testCases.add(new TestCase("MoveExceptionOnEntry", "MoveExceptionOnEntry",
"moveExceptionOnEntry", new Object[]{0}, new VerifyError(), null));
+ testCases.add(new TestCase("EmptySparseSwitch", "EmptySparseSwitch", "run", null, null,
+ null));
}
public void runTests() {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 9755efb..28fbc3e 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -95,9 +95,9 @@
RELOCATE_TYPES += no-relocate
endif
ifeq ($(ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT),true)
- RELOCATE_TYPES := relocate-no-patchoat
+ RELOCATE_TYPES := relocate-npatchoat
endif
-TRACE_TYPES := no-trace
+TRACE_TYPES := ntrace
ifeq ($(ART_TEST_TRACE),true)
TRACE_TYPES += trace
endif
@@ -119,7 +119,7 @@
ifeq ($(ART_TEST_PIC_IMAGE),true)
IMAGE_TYPES += picimage
endif
-PICTEST_TYPES := nopictest
+PICTEST_TYPES := npictest
ifeq ($(ART_TEST_PIC_TEST),true)
PICTEST_TYPES += pictest
endif
@@ -130,7 +130,7 @@
ifeq ($(ART_TEST_RUN_TEST_NDEBUG),true)
RUN_TYPES += ndebug
endif
-DEBUGGABLE_TYPES := nondebuggable
+DEBUGGABLE_TYPES := ndebuggable
ifeq ($(ART_TEST_RUN_TEST_DEBUGGABLE),true)
DEBUGGABLE_TYPES += debuggable
endif
@@ -272,9 +272,9 @@
$(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
-ifneq (,$(filter relocate-no-patchoat,$(RELOCATE_TYPES)))
+ifneq (,$(filter relocate-npatchoat,$(RELOCATE_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES), relocate-no-patchoat,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(COMPILER_TYPES), relocate-npatchoat,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
@@ -375,7 +375,7 @@
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),nondebuggable,$(TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES),$(PICTEST_TYPES),ndebuggable,$(TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS :=
@@ -461,10 +461,10 @@
# Create a rule to build and run a tests following the form:
# test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
-# {4: interpreter default optimizing jit}-{5: relocate no-relocate relocate-no-patchoat}-
-# {6: trace or no-trace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
-# {9: no-image image picimage}-{10: pictest nopictest}-
-# {11: nondebuggable debuggable}-{12: test name}{13: 32 or 64}
+# {4: interpreter default optimizing jit}-{5: relocate nrelocate relocate-npatchoat}-
+# {6: trace or ntrace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
+# {9: no-image image picimage}-{10: pictest npictest}-
+# {11: ndebuggable debuggable}-{12: test name}{13: 32 or 64}
define define-test-art-run-test
run_test_options :=
prereq_rule :=
@@ -543,7 +543,7 @@
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_RELOCATE_RULES
run_test_options += --no-relocate
else
- ifeq ($(5),relocate-no-patchoat)
+ ifeq ($(5),relocate-npatchoat)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_RELOCATE_NO_PATCHOAT_RULES
run_test_options += --relocate --no-patchoat
else
@@ -555,7 +555,7 @@
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_TRACE_RULES
run_test_options += --trace
else
- ifeq ($(6),no-trace)
+ ifeq ($(6),ntrace)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_TRACE_RULES
else
$$(error found $(6) expected $(TRACE_TYPES))
@@ -635,7 +635,7 @@
ifeq ($(10),pictest)
run_test_options += --pic-test
else
- ifeq ($(10),nopictest)
+ ifeq ($(10),npictest)
# Nothing to be done.
else
$$(error found $(10) expected $(PICTEST_TYPES))
@@ -645,7 +645,7 @@
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEBUGGABLE_RULES
run_test_options += --debuggable
else
- ifeq ($(11),nondebuggable)
+ ifeq ($(11),ndebuggable)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NONDEBUGGABLE_RULES
# Nothing to be done.
else
diff --git a/test/etc/default-build b/test/etc/default-build
index 58c9564..fbe97f9 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -39,7 +39,7 @@
fi
mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
+${JAVAC} -implicit:none -classpath src-multidex -d classes `find src -name '*.java'`
if [ -d src2 ]; then
${JAVAC} -d classes `find src2 -name '*.java'`
@@ -72,6 +72,15 @@
fi
fi
-if [ ${NEED_DEX} = "true" ]; then
+# Create a single jar with two dex files for multidex.
+if [ -d src-multidex ]; then
+ mkdir classes2
+ ${JAVAC} -implicit:none -classpath src -d classes2 `find src-multidex -name '*.java'`
+ if [ ${NEED_DEX} = "true" ]; then
+ ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex \
+ --dump-width=1000 ${DX_FLAGS} classes2
+ zip $TEST_NAME.jar classes.dex classes2.dex
+ fi
+elif [ ${NEED_DEX} = "true" ]; then
zip $TEST_NAME.jar classes.dex
fi
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 840ff80..414e4df 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -267,7 +267,7 @@
if [ "$JIT" = "y" ]; then
INT_OPTS="-Xusejit:true"
if [ "$VERIFY" = "y" ] ; then
- COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
+ COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-at-runtime"
else
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none"
DEX_VERIFY="${DEX_VERIFY} -Xverify:none"
diff --git a/test/run-all-tests b/test/run-all-tests
index d0b3cf9..13490c4 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -112,6 +112,7 @@
shift;
elif [ "x$1" = "x--always-clean" ]; then
run_args="${run_args} --always-clean"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
usage="yes"
diff --git a/test/run-test b/test/run-test
index df0fce4..2873a35 100755
--- a/test/run-test
+++ b/test/run-test
@@ -441,8 +441,8 @@
echo " --build-only Build test files only (off by default)."
echo " --interpreter Enable interpreter only mode (off by default)."
echo " --jit Enable jit (off by default)."
- echo " --optimizing Enable optimizing compiler (off by default)."
- echo " --quick Use Quick compiler (default)."
+ echo " --optimizing Enable optimizing compiler (default)."
+ echo " --quick Use Quick compiler (off by default)."
echo " --no-verify Turn off verification (on by default)."
echo " --no-optimize Turn off optimization (on by default)."
echo " --no-precise Turn off precise GC (on by default)."
diff --git a/tools/dexfuzz/src/dexfuzz/rawdex/Instruction.java b/tools/dexfuzz/src/dexfuzz/rawdex/Instruction.java
index 2dda78f..adafa62 100644
--- a/tools/dexfuzz/src/dexfuzz/rawdex/Instruction.java
+++ b/tools/dexfuzz/src/dexfuzz/rawdex/Instruction.java
@@ -434,7 +434,7 @@
addOpcodeInfo(Opcode.INVOKE_DIRECT, "invoke-direct", 0x70, new Format35c());
addOpcodeInfo(Opcode.INVOKE_STATIC, "invoke-static", 0x71, new Format35c());
addOpcodeInfo(Opcode.INVOKE_INTERFACE, "invoke-interface", 0x72, new Format35c());
- addOpcodeInfo(Opcode.RETURN_VOID_BARRIER, "return-void-barrier", 0x73, new Format10x());
+ addOpcodeInfo(Opcode.RETURN_VOID_NO_BARRIER, "return-void-no-barrier", 0x73, new Format10x());
addOpcodeInfo(Opcode.INVOKE_VIRTUAL_RANGE, "invoke-virtual/range", 0x74, new Format3rc());
addOpcodeInfo(Opcode.INVOKE_SUPER_RANGE, "invoke-super/range", 0x75, new Format3rc());
addOpcodeInfo(Opcode.INVOKE_DIRECT_RANGE, "invoke-direct/range", 0x76, new Format3rc());
diff --git a/tools/dexfuzz/src/dexfuzz/rawdex/Opcode.java b/tools/dexfuzz/src/dexfuzz/rawdex/Opcode.java
index 312e855..f7c7788 100644
--- a/tools/dexfuzz/src/dexfuzz/rawdex/Opcode.java
+++ b/tools/dexfuzz/src/dexfuzz/rawdex/Opcode.java
@@ -132,7 +132,7 @@
INVOKE_DIRECT,
INVOKE_STATIC,
INVOKE_INTERFACE,
- RETURN_VOID_BARRIER,
+ RETURN_VOID_NO_BARRIER,
INVOKE_VIRTUAL_RANGE,
INVOKE_SUPER_RANGE,
INVOKE_DIRECT_RANGE,
@@ -277,4 +277,4 @@
public static boolean isBetween(Opcode opcode, Opcode opcode1, Opcode opcode2) {
return (opcode.ordinal() >= opcode1.ordinal() && opcode.ordinal() <= opcode2.ordinal());
}
-}
\ No newline at end of file
+}
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index b87b2ff..2040b57 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -63,18 +63,19 @@
"org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition"]
},
{
- description: "Failing due to missing localhost on volantis.",
- result: EXEC_FAILED,
- modes: [device],
- names: ["org.apache.harmony.luni.tests.internal.net.www.protocol.http.HttpURLConnectionTest",
- "org.apache.harmony.luni.tests.internal.net.www.protocol.https.HttpsURLConnectionTest",
- "org.apache.harmony.luni.tests.java.net.URLConnectionTest"]
-},
-{
description: "Failing due to missing localhost on hammerhead and volantis.",
result: EXEC_FAILED,
modes: [device],
- names: ["libcore.javax.crypto.CipherTest#testCipherInitWithCertificate"]
+ names: ["libcore.javax.crypto.CipherTest#testCipherInitWithCertificate",
+ "libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithFtpURLConnection",
+ "libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarFtpURLConnection",
+ "libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithLoggingSocketHandler",
+ "libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithHttpURLConnection",
+ "libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarHttpURLConnection",
+ "org.apache.harmony.luni.tests.internal.net.www.protocol.http.HttpURLConnectionTest",
+ "org.apache.harmony.luni.tests.internal.net.www.protocol.https.HttpsURLConnectionTest",
+ "org.apache.harmony.luni.tests.java.net.URLConnectionTest"
+ ]
},
{
description: "Test timeouts",
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
new file mode 100755
index 0000000..90c01f5
--- /dev/null
+++ b/tools/run-jdwp-tests.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ ! -d libcore ]; then
+ echo "Script needs to be run at the root of the android tree"
+ exit 1
+fi
+
+if [[ $ANDROID_SERIAL == 03a79ae90ae5889b ]] || [[ $ANDROID_SERIAL == HT4CTJT03670 ]] || [[ $ANDROID_SERIAL == HT49CJT00070 ]]; then
+ echo "Not run because of localhost failures. Investigating."
+ exit 0
+fi
+
+# Jar containing all the tests.
+test_jar=out/host/linux-x86/framework/apache-harmony-jdwp-tests-hostdex.jar
+junit_jar=out/host/linux-x86/framework/junit.jar
+
+if [ ! -f $test_jar -o ! -f $junit_jar ]; then
+ echo "Before running, you must build jdwp tests and vogar:" \
+ "make junit apache-harmony-jdwp-tests-hostdex vogar vogar.jar"
+ exit 1
+fi
+
+art="/data/local/tmp/system/bin/art"
+# We use Quick's image on target because optimizing's image is not compiled debuggable.
+image="-Ximage:/data/art-test/core.art"
+args=$@
+debuggee_args="-Xcompiler-option --compiler-backend=Optimizing -Xcompiler-option --debuggable"
+device_dir="--device-dir=/data/local/tmp"
+# We use the art script on target to ensure the runner and the debuggee share the same
+# image.
+vm_command="--vm-command=$art"
+image_compiler_option=""
+
+while true; do
+ if [[ "$1" == "--mode=host" ]]; then
+ # Specify bash explicitly since the art script cannot, since it has to run on the device
+ # with mksh.
+ art="bash out/host/linux-x86/bin/art"
+ # We force generation of a new image to avoid build-time and run-time classpath differences.
+ image="-Ximage:/system/non/existent"
+ # We do not need a device directory on host.
+ device_dir=""
+ # Vogar knows which VM to use on host.
+ vm_command=""
+ # We only compile the image on the host. Note that not providing this option
+ # puts us below the adb command limit for vogar.
+ image_compiler_option="--vm-arg -Ximage-compiler-option --vm-arg --debuggable"
+ shift
+ elif [[ $1 == -Ximage:* ]]; then
+ image="$1"
+ shift
+ elif [[ "$1" == "" ]]; then
+ break
+ else
+ shift
+ fi
+done
+
+# Run the tests using vogar.
+vogar $vm_command \
+ --vm-arg $image \
+ --verbose \
+ $args \
+ $device_dir \
+ $image_compiler_option \
+ --timeout 600 \
+ --vm-arg -Djpda.settings.verbose=true \
+ --vm-arg -Djpda.settings.syncPort=34016 \
+ --vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \
+ --vm-arg -Djpda.settings.debuggeeJavaPath="$art $image $debuggee_args" \
+ --classpath $test_jar \
+ --classpath $junit_jar \
+ --vm-arg -Xcompiler-option --vm-arg --compiler-backend=Optimizing \
+ --vm-arg -Xcompiler-option --vm-arg --debuggable \
+ org.apache.harmony.jpda.tests.share.AllTests
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index fc396b6..7faf86e 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -24,5 +24,9 @@
adb shell setenforce 0
adb shell getenforce
+echo -e "${green}Setting local loopback${nc}"
+adb shell ifconfig lo up
+adb shell ifconfig
+
echo -e "${green}List properties${nc}"
adb shell getprop