Use LIRSlowPath for intrinsics, improve String.indexOf().
Rewrite intrinsic launchpads to use the LIRSlowPath.
Improve String.indexOf for constant chars by avoiding
the check for code points over 0xFFFF.
Change-Id: I7fd5583214c5b4ab9c38ee36c5d6f003dd6345a8
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 34d3834..d095444 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -997,7 +997,6 @@
fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
- intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
tempreg_info_(arena, 20, kGrowableArrayMisc),
reginfo_map_(arena, 64, kGrowableArrayMisc),
pointer_storage_(arena, 128, kGrowableArrayMisc),
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 00c51d4..1c5f6a0 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -581,24 +581,6 @@
}
}
-void Mir2Lir::HandleIntrinsicLaunchPads() {
- int num_elems = intrinsic_launchpads_.Size();
- for (int i = 0; i < num_elems; i++) {
- ResetRegPool();
- ResetDefTracking();
- LIR* lab = intrinsic_launchpads_.Get(i);
- CallInfo* info = reinterpret_cast<CallInfo*>(UnwrapPointer(lab->operands[0]));
- current_dalvik_offset_ = info->offset;
- AppendLIR(lab);
- // NOTE: GenInvoke handles MarkSafepointPC
- GenInvoke(info);
- LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[2]));
- if (resume_lab != NULL) {
- OpUnconditionalBranch(resume_lab);
- }
- }
-}
-
void Mir2Lir::HandleThrowLaunchPads() {
int num_elems = throw_launchpads_.Size();
for (int i = 0; i < num_elems; i++) {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 1907012..419d0a9 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -34,6 +34,32 @@
* and "op" calls may be used here.
*/
+void Mir2Lir::AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume) {
+ class IntrinsicLaunchpadPath : public Mir2Lir::LIRSlowPath {
+ public:
+ IntrinsicLaunchpadPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
+ : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
+ }
+
+ void Compile() {
+ m2l_->ResetRegPool();
+ m2l_->ResetDefTracking();
+ LIR* label = GenerateTargetLabel();
+ label->opcode = kPseudoIntrinsicRetry;
+ // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
+ m2l_->GenInvokeNoInline(info_);
+ if (cont_ != nullptr) {
+ m2l_->OpUnconditionalBranch(cont_);
+ }
+ }
+
+ private:
+ CallInfo* const info_;
+ };
+
+ AddSlowPath(new (arena_) IntrinsicLaunchpadPath(this, info, branch, resume));
+}
+
/*
* To save scheduling time, helper calls are broken into two parts: generation of
* the helper target address, and the actuall call to the helper. Because x86
@@ -984,7 +1010,7 @@
int reg_max;
GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
- LIR* launch_pad = NULL;
+ LIR* range_check_branch = nullptr;
int reg_off = INVALID_REG;
int reg_ptr = INVALID_REG;
if (cu_->instruction_set != kX86) {
@@ -998,25 +1024,22 @@
LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
- launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
- intrinsic_launchpads_.Insert(launch_pad);
OpRegReg(kOpCmp, rl_idx.reg.GetReg(), reg_max);
FreeTemp(reg_max);
- OpCondBranch(kCondUge, launch_pad);
+ range_check_branch = OpCondBranch(kCondUge, nullptr);
}
OpRegImm(kOpAdd, reg_ptr, data_offset);
} else {
if (range_check) {
// On x86, we can compare to memory directly
// Set up a launch pad to allow retry in case of bounds violation */
- launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
- intrinsic_launchpads_.Insert(launch_pad);
if (rl_idx.is_const) {
- OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset,
- mir_graph_->ConstantValue(rl_idx.orig_sreg), launch_pad);
+ range_check_branch = OpCmpMemImmBranch(
+ kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset,
+ mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr);
} else {
OpRegMem(kOpCmp, rl_idx.reg.GetReg(), rl_obj.reg.GetReg(), count_offset);
- OpCondBranch(kCondUge, launch_pad);
+ range_check_branch = OpCondBranch(kCondUge, nullptr);
}
}
reg_off = AllocTemp();
@@ -1045,10 +1068,10 @@
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
if (range_check) {
- launch_pad->operands[2] = 0; // no resumption
+ DCHECK(range_check_branch != nullptr);
+ info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've already null checked.
+ AddIntrinsicLaunchpad(info, range_check_branch);
}
- // Record that we've already inlined & null checked
- info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
return true;
}
@@ -1232,7 +1255,7 @@
}
/*
- * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff,
+ * Fast String.indexOf(I) & (II). Tests for simple case of char <= 0xFFFF,
* otherwise bails to standard library code.
*/
bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
@@ -1240,14 +1263,19 @@
// TODO - add Mips implementation
return false;
}
+ RegLocation rl_obj = info->args[0];
+ RegLocation rl_char = info->args[1];
+ if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
+ // Code point beyond 0xFFFF. Punt to the real String.indexOf().
+ return false;
+ }
+
ClobberCallerSave();
LockCallTemps(); // Using fixed registers
int reg_ptr = TargetReg(kArg0);
int reg_char = TargetReg(kArg1);
int reg_start = TargetReg(kArg2);
- RegLocation rl_obj = info->args[0];
- RegLocation rl_char = info->args[1];
LoadValueDirectFixed(rl_obj, reg_ptr);
LoadValueDirectFixed(rl_char, reg_char);
if (zero_based) {
@@ -1258,15 +1286,20 @@
}
int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf));
GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
- LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
- intrinsic_launchpads_.Insert(launch_pad);
- OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
+ LIR* high_code_point_branch =
+ rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
// NOTE: not a safepoint
OpReg(kOpBlx, r_tgt);
- LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
- launch_pad->operands[2] = WrapPointer(resume_tgt);
- // Record that we've already inlined & null checked
- info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ if (!rl_char.is_const) {
+ // Add the slow path for code points beyond 0xFFFF.
+ DCHECK(high_code_point_branch != nullptr);
+ LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
+ info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
+ AddIntrinsicLaunchpad(info, high_code_point_branch, resume_tgt);
+ } else {
+ DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
+ DCHECK(high_code_point_branch == nullptr);
+ }
RegLocation rl_return = GetReturn(false);
RegLocation rl_dest = InlineTarget(info);
StoreValue(rl_dest, rl_return);
@@ -1291,19 +1324,16 @@
int r_tgt = (cu_->instruction_set != kX86) ?
LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
+ info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// TUNING: check if rl_cmp.s_reg_low is already null checked
- LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
- intrinsic_launchpads_.Insert(launch_pad);
- OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
+ LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
+ AddIntrinsicLaunchpad(info, cmp_null_check_branch);
// NOTE: not a safepoint
if (cu_->instruction_set != kX86) {
OpReg(kOpBlx, r_tgt);
} else {
OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo));
}
- launch_pad->operands[2] = 0; // No return possible
- // Record that we've already inlined & null checked
- info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
RegLocation rl_return = GetReturn(false);
RegLocation rl_dest = InlineTarget(info);
StoreValue(rl_dest, rl_return);
@@ -1390,13 +1420,15 @@
}
void Mir2Lir::GenInvoke(CallInfo* info) {
- if (!(info->opt_flags & MIR_INLINED)) {
- DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
- if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
- ->GenIntrinsic(this, info)) {
- return;
- }
+ DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
+ if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
+ ->GenIntrinsic(this, info)) {
+ return;
}
+ GenInvokeNoInline(info);
+}
+
+void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
int call_state = 0;
LIR* null_ck;
LIR** p_null_ck = NULL;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 31f5c28..f93a5e3 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1088,8 +1088,6 @@
HandleSuspendLaunchPads();
HandleThrowLaunchPads();
-
- HandleIntrinsicLaunchPads();
}
//
@@ -1097,10 +1095,10 @@
//
LIR* Mir2Lir::LIRSlowPath::GenerateTargetLabel() {
- LIR* target = m2l_->RawLIR(current_dex_pc_, kPseudoTargetLabel);
- m2l_->AppendLIR(target);
- fromfast_->target = target;
m2l_->SetCurrentDexPc(current_dex_pc_);
+ LIR* target = m2l_->NewLIR0(kPseudoTargetLabel);
+ fromfast_->target = target;
return target;
}
+
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 6955577..856318f 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -535,11 +535,11 @@
RegisterInfo* GetRegInfo(int reg);
// Shared by all targets - implemented in gen_common.cc.
+ void AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume = nullptr);
bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit);
bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
void HandleSuspendLaunchPads();
- void HandleIntrinsicLaunchPads();
void HandleThrowLaunchPads();
void HandleSlowPaths();
void GenBarrier();
@@ -637,6 +637,7 @@
RegLocation arg2,
bool safepoint_pc);
void GenInvoke(CallInfo* info);
+ void GenInvokeNoInline(CallInfo* info);
void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
NextCallInsn next_call_insn,
@@ -1196,7 +1197,6 @@
GrowableArray<FillArrayData*> fill_array_data_;
GrowableArray<LIR*> throw_launchpads_;
GrowableArray<LIR*> suspend_launchpads_;
- GrowableArray<LIR*> intrinsic_launchpads_;
GrowableArray<RegisterInfo*> tempreg_info_;
GrowableArray<RegisterInfo*> reginfo_map_;
GrowableArray<void*> pointer_storage_;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 3596fff..5b6e119 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -965,21 +965,17 @@
// Is the string non-NULL?
LoadValueDirectFixed(rl_obj, rDX);
GenNullCheck(rl_obj.s_reg_low, rDX, info->opt_flags);
-
- // Record that we have inlined & null checked the object.
- info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// Does the character fit in 16 bits?
- LIR* launch_pad = nullptr;
+ LIR* launchpad_branch = nullptr;
if (rl_char.is_const) {
// We need the value in EAX.
LoadConstantNoClobber(rAX, char_value);
} else {
// Character is not a constant; compare at runtime.
LoadValueDirectFixed(rl_char, rAX);
- launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
- intrinsic_launchpads_.Insert(launch_pad);
- OpCmpImmBranch(kCondGt, rAX, 0xFFFF, launch_pad);
+ launchpad_branch = OpCmpImmBranch(kCondGt, rAX, 0xFFFF, nullptr);
}
// From here down, we know that we are looking for a char that fits in 16 bits.
@@ -1096,9 +1092,9 @@
NewLIR1(kX86Pop32R, rDI);
// Out of line code returns here.
- if (launch_pad != nullptr) {
+ if (launchpad_branch != nullptr) {
LIR *return_point = NewLIR0(kPseudoTargetLabel);
- launch_pad->operands[2] = WrapPointer(return_point);
+ AddIntrinsicLaunchpad(info, launchpad_branch, return_point);
}
StoreValue(rl_dest, rl_return);