ART: Quick compiler: More size checks, add TargetReg variants
Add variants for TargetReg for requesting specific register usage,
e.g., wide and ref. More register size checks.
With code adapted from https://android-review.googlesource.com/#/c/98605/.
Change-Id: I852d3be509d4dcd242c7283da702a2a76357278d
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 569c97f..aa574dc 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -223,7 +223,7 @@
void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0,
RegStorage arg1, bool safepoint_pc) {
RegStorage r_tgt = CallHelperSetup(helper_offset);
- OpRegCopy(TargetReg(kArg1), arg1);
+ OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1);
LoadConstant(TargetReg(kArg0), arg0);
ClobberCallerSave();
CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
@@ -276,7 +276,7 @@
OpRegCopy(TargetReg(kArg0), arg0);
}
LoadCurrMethodDirect(TargetReg(kArg1));
- LoadValueDirectFixed(arg2, TargetReg(kArg2));
+ LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
ClobberCallerSave();
CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
}
@@ -288,80 +288,103 @@
RegLocation arg0, RegLocation arg1,
bool safepoint_pc) {
RegStorage r_tgt = CallHelperSetup(helper_offset);
- if (arg0.wide == 0) {
- LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
- if (arg1.wide == 0) {
- if (cu_->instruction_set == kMips) {
- LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
- } else if (cu_->instruction_set == kArm64) {
- LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
- } else if (cu_->instruction_set == kX86_64) {
- if (arg0.fp) {
- LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg1) : TargetReg(kArg0));
- } else {
- LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg0) : TargetReg(kArg1));
- }
- } else {
- LoadValueDirectFixed(arg1, TargetReg(kArg1));
- }
+ if (cu_->instruction_set == kArm64) {
+ RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
+
+ RegStorage arg1_reg;
+ if (arg1.fp == arg0.fp) {
+ arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1);
} else {
- if (cu_->instruction_set == kMips) {
- RegStorage r_tmp;
- if (arg1.fp) {
- r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
- } else {
- // skip kArg1 for stack alignment.
- r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
- }
- LoadValueDirectWideFixed(arg1, r_tmp);
- } else {
- RegStorage r_tmp;
- if (cu_->target64) {
- r_tmp = RegStorage::Solo64(TargetReg(kArg1).GetReg());
- } else {
- r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
- }
- LoadValueDirectWideFixed(arg1, r_tmp);
- }
+ arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1);
+ }
+
+ if (arg0.wide == 0) {
+ LoadValueDirectFixed(arg0, arg0_reg);
+ } else {
+ LoadValueDirectWideFixed(arg0, arg0_reg);
+ }
+
+ if (arg1.wide == 0) {
+ LoadValueDirectFixed(arg1, arg1_reg);
+ } else {
+ LoadValueDirectWideFixed(arg1, arg1_reg);
}
} else {
- RegStorage r_tmp;
- if (arg0.fp) {
- if (cu_->target64) {
- r_tmp = RegStorage::FloatSolo64(TargetReg(kFArg0).GetReg());
+ if (arg0.wide == 0) {
+ LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+ if (arg1.wide == 0) {
+ if (cu_->instruction_set == kMips) {
+ LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
+ } else if (cu_->instruction_set == kArm64) {
+ LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+ } else if (cu_->instruction_set == kX86_64) {
+ if (arg0.fp) {
+ LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg1) : TargetReg(kArg0));
+ } else {
+ LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg0) : TargetReg(kArg1));
+ }
+ } else {
+ LoadValueDirectFixed(arg1, TargetReg(kArg1));
+ }
} else {
- r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg0), TargetReg(kFArg1));
- }
- } else {
- if (cu_->target64) {
- r_tmp = RegStorage::Solo64(TargetReg(kArg0).GetReg());
- } else {
- r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
- }
- }
- LoadValueDirectWideFixed(arg0, r_tmp);
- if (arg1.wide == 0) {
- if (cu_->target64) {
- LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
- } else {
- LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
+ if (cu_->instruction_set == kMips) {
+ RegStorage r_tmp;
+ if (arg1.fp) {
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
+ } else {
+ // skip kArg1 for stack alignment.
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ }
+ LoadValueDirectWideFixed(arg1, r_tmp);
+ } else {
+ RegStorage r_tmp;
+ if (cu_->target64) {
+ r_tmp = RegStorage::Solo64(TargetReg(kArg1).GetReg());
+ } else {
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
+ }
+ LoadValueDirectWideFixed(arg1, r_tmp);
+ }
}
} else {
RegStorage r_tmp;
- if (arg1.fp) {
+ if (arg0.fp) {
if (cu_->target64) {
- r_tmp = RegStorage::FloatSolo64(TargetReg(kFArg1).GetReg());
+ r_tmp = RegStorage::FloatSolo64(TargetReg(kFArg0).GetReg());
} else {
- r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg0), TargetReg(kFArg1));
}
} else {
if (cu_->target64) {
- r_tmp = RegStorage::Solo64(TargetReg(kArg1).GetReg());
+ r_tmp = RegStorage::Solo64(TargetReg(kArg0).GetReg());
} else {
- r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
}
}
- LoadValueDirectWideFixed(arg1, r_tmp);
+ LoadValueDirectWideFixed(arg0, r_tmp);
+ if (arg1.wide == 0) {
+ if (cu_->target64) {
+ LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+ } else {
+ LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
+ }
+ } else {
+ RegStorage r_tmp;
+ if (arg1.fp) {
+ if (cu_->target64) {
+ r_tmp = RegStorage::FloatSolo64(TargetReg(kFArg1).GetReg());
+ } else {
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
+ }
+ } else {
+ if (cu_->target64) {
+ r_tmp = RegStorage::Solo64(TargetReg(kArg1).GetReg());
+ } else {
+ r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ }
+ }
+ LoadValueDirectWideFixed(arg1, r_tmp);
+ }
}
}
ClobberCallerSave();
@@ -381,16 +404,16 @@
if (IsSameReg(arg1, TargetReg(kArg0))) {
if (IsSameReg(arg0, TargetReg(kArg1))) {
// Swap kArg0 and kArg1 with kArg2 as temp.
- OpRegCopy(TargetArgReg(kArg2, arg1.Is64Bit()), arg1);
- OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
- OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), TargetReg(kArg2));
+ OpRegCopy(TargetReg(kArg2, arg1.Is64Bit()), arg1);
+ OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
+ OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), TargetReg(kArg2, arg1.Is64Bit()));
} else {
- OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), arg1);
- OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
+ OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1);
+ OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
}
} else {
- OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
- OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), arg1);
+ OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
+ OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1);
}
}
@@ -421,9 +444,9 @@
void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
int arg0, RegLocation arg2, bool safepoint_pc) {
RegStorage r_tgt = CallHelperSetup(helper_offset);
- LoadValueDirectFixed(arg2, TargetReg(kArg2));
+ LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
LoadCurrMethodDirect(TargetReg(kArg1));
- LoadConstant(TargetReg(kArg0), arg0);
+ LoadConstant(TargetReg(kArg0, arg0), arg0);
ClobberCallerSave();
CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
}
@@ -449,13 +472,13 @@
RegStorage r_tgt = CallHelperSetup(helper_offset);
DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U); // The static_cast works around an
// instantiation bug in GCC.
- LoadValueDirectFixed(arg1, TargetReg(kArg1));
+ LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
if (arg2.wide == 0) {
- LoadValueDirectFixed(arg2, TargetReg(kArg2));
+ LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
} else {
RegStorage r_tmp;
if (cu_->target64) {
- r_tmp = RegStorage::Solo64(TargetReg(kArg2).GetReg());
+ r_tmp = TargetReg(kArg2, true);
} else {
r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
}
@@ -474,12 +497,9 @@
RegLocation arg2,
bool safepoint_pc) {
RegStorage r_tgt = CallHelperSetup(helper_offset);
- DCHECK_EQ(static_cast<unsigned int>(arg0.wide), 0U);
- LoadValueDirectFixed(arg0, TargetReg(kArg0));
- DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);
- LoadValueDirectFixed(arg1, TargetReg(kArg1));
- DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);
- LoadValueDirectFixed(arg2, TargetReg(kArg2));
+ LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
+ LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
+ LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
ClobberCallerSave();
CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
}
@@ -502,13 +522,13 @@
*/
RegLocation rl_src = rl_method;
rl_src.location = kLocPhysReg;
- rl_src.reg = TargetReg(kArg0);
+ rl_src.reg = TargetRefReg(kArg0);
rl_src.home = false;
MarkLive(rl_src);
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
+ StoreRefDisp(TargetReg(kSp), 0, rl_src.reg, kNotVolatile);
}
if (cu_->num_ins == 0) {
@@ -615,15 +635,16 @@
return -1;
}
} else {
+ RegStorage arg0_ref = cg->TargetRefReg(kArg0);
switch (state) {
case 0: // Get the current Method* [sets kArg0]
// TUNING: we can save a reg copy if Method* has been promoted.
- cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
+ cg->LoadCurrMethodDirect(arg0_ref);
break;
case 1: // Get method->dex_cache_resolved_methods_
- cg->LoadRefDisp(cg->TargetReg(kArg0),
+ cg->LoadRefDisp(arg0_ref,
mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- cg->TargetReg(kArg0),
+ arg0_ref,
kNotVolatile);
// Set up direct code if known.
if (direct_code != 0) {
@@ -637,15 +658,15 @@
break;
case 2: // Grab target method*
CHECK_EQ(cu->dex_file, target_method.dex_file);
- cg->LoadRefDisp(cg->TargetReg(kArg0),
+ cg->LoadRefDisp(arg0_ref,
ObjArray::OffsetOfElement(target_method.dex_method_index).Int32Value(),
- cg->TargetReg(kArg0),
+ arg0_ref,
kNotVolatile);
break;
case 3: // Grab the code from the method*
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
if (direct_code == 0) {
- cg->LoadWordDisp(cg->TargetReg(kArg0),
+ cg->LoadWordDisp(arg0_ref,
mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
}
@@ -678,13 +699,13 @@
switch (state) {
case 0: { // Get "this" [set kArg1]
RegLocation rl_arg = info->args[0];
- cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
+ cg->LoadValueDirectFixed(rl_arg, cg->TargetRefReg(kArg1));
break;
}
case 1: // Is "this" null? [use kArg1]
- cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
+ cg->GenNullCheck(cg->TargetRefReg(kArg1), info->opt_flags);
// get this->klass_ [use kArg1, set kInvokeTgt]
- cg->LoadRefDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
+ cg->LoadRefDisp(cg->TargetRefReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
cg->TargetReg(kInvokeTgt),
kNotVolatile);
cg->MarkPossibleNullPointerException(info->opt_flags);
@@ -697,12 +718,12 @@
case 3: // Get target method [use kInvokeTgt, set kArg0]
cg->LoadRefDisp(cg->TargetReg(kInvokeTgt),
ObjArray::OffsetOfElement(method_idx).Int32Value(),
- cg->TargetReg(kArg0),
+ cg->TargetRefReg(kArg0),
kNotVolatile);
break;
case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
- cg->LoadWordDisp(cg->TargetReg(kArg0),
+ cg->LoadWordDisp(cg->TargetRefReg(kArg0),
mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
break;
@@ -736,13 +757,13 @@
break;
case 1: { // Get "this" [set kArg1]
RegLocation rl_arg = info->args[0];
- cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
+ cg->LoadValueDirectFixed(rl_arg, cg->TargetRefReg(kArg1));
break;
}
case 2: // Is "this" null? [use kArg1]
- cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
+ cg->GenNullCheck(cg->TargetRefReg(kArg1), info->opt_flags);
// Get this->klass_ [use kArg1, set kInvokeTgt]
- cg->LoadRefDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
+ cg->LoadRefDisp(cg->TargetRefReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
cg->TargetReg(kInvokeTgt),
kNotVolatile);
cg->MarkPossibleNullPointerException(info->opt_flags);
@@ -757,12 +778,12 @@
// NOTE: native pointer.
cg->LoadRefDisp(cg->TargetReg(kInvokeTgt),
ObjArray::OffsetOfElement(method_idx % ClassLinker::kImtSize).Int32Value(),
- cg->TargetReg(kArg0),
+ cg->TargetRefReg(kArg0),
kNotVolatile);
break;
case 5: // Get the compiled code address [use kArg0, set kInvokeTgt]
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
- cg->LoadWordDisp(cg->TargetReg(kArg0),
+ cg->LoadWordDisp(cg->TargetRefReg(kArg0),
mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
break;