Clean-up call_x86.cc
Also adds some DCHECKs and fixes for the bugs found by them.
Change-Id: I455bbfe2c6018590cf491880cd9273edbe39c4c7
Signed-off-by: Serguei Katkov <serguei.i.katkov@intel.com>
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 6ca220c..9000514 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -94,13 +94,10 @@
start_of_method_reg = rl_method.reg;
store_method_addr_used_ = true;
} else {
- if (cu_->target64) {
- start_of_method_reg = AllocTempWide();
- } else {
- start_of_method_reg = AllocTemp();
- }
+ start_of_method_reg = AllocTempRef();
NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg());
}
+ DCHECK_EQ(start_of_method_reg.Is64Bit(), cu_->target64);
int low_key = s4FromSwitchData(&table[2]);
RegStorage keyReg;
// Remove the bias, if necessary
@@ -111,7 +108,7 @@
OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
}
// Bounds check - if < 0 or >= size continue following switch
- OpRegImm(kOpCmp, keyReg, size-1);
+ OpRegImm(kOpCmp, keyReg, size - 1);
LIR* branch_over = OpCondBranch(kCondHi, NULL);
// Load the displacement from the switch table
@@ -119,11 +116,7 @@
NewLIR5(kX86PcRelLoadRA, disp_reg.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(),
2, WrapPointer(tab_rec));
// Add displacement to start of method
- if (cu_->target64) {
- NewLIR2(kX86Add64RR, start_of_method_reg.GetReg(), disp_reg.GetReg());
- } else {
- OpRegReg(kOpAdd, start_of_method_reg, disp_reg);
- }
+ OpRegReg(kOpAdd, start_of_method_reg, cu_->target64 ? As64BitReg(disp_reg) : disp_reg);
// ..and go!
LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg.GetReg());
tab_rec->anchor = switch_branch;
@@ -174,7 +167,6 @@
}
store_method_addr_used_ = true;
} else {
- // TODO(64) force to be 64-bit
NewLIR1(kX86StartOfMethod, method_start.GetReg());
}
NewLIR2(kX86PcRelAdr, payload.GetReg(), WrapPointer(tab_rec));
@@ -193,8 +185,8 @@
Thread::ExceptionOffset<8>().Int32Value() :
Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
- NewLIR2(kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
- NewLIR2(kX86Mov32TI, ex_offset, 0);
+ NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
+ NewLIR2(cu_->target64 ? kX86Mov64TI : kX86Mov32TI, ex_offset, 0);
StoreValue(rl_dest, rl_result);
}
@@ -202,17 +194,15 @@
* Mark garbage collection card. Skip if the value we're storing is null.
*/
void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
- RegStorage reg_card_base = AllocTemp();
- RegStorage reg_card_no = AllocTemp();
+ DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64);
+ DCHECK_EQ(val_reg.Is64Bit(), cu_->target64);
+ RegStorage reg_card_base = AllocTempRef();
+ RegStorage reg_card_no = AllocTempRef();
LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
int ct_offset = cu_->target64 ?
Thread::CardTableOffset<8>().Int32Value() :
Thread::CardTableOffset<4>().Int32Value();
- if (cu_->target64) {
- NewLIR2(kX86Mov64RT, reg_card_base.GetReg(), ct_offset);
- } else {
- NewLIR2(kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
- }
+ NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
LIR* target = NewLIR0(kPseudoTargetLabel);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 55e5993..5f4f23a 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -28,7 +28,7 @@
protected:
class InToRegStorageMapper {
public:
- virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide) = 0;
+ virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) = 0;
virtual ~InToRegStorageMapper() {}
};
@@ -36,7 +36,7 @@
public:
explicit InToRegStorageX86_64Mapper(Mir2Lir* ml) : ml_(ml), cur_core_reg_(0), cur_fp_reg_(0) {}
virtual ~InToRegStorageX86_64Mapper() {}
- virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide);
+ virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref);
protected:
Mir2Lir* ml_;
private:
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 43882c2..2731343 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1867,7 +1867,7 @@
}
// ------------ ABI support: mapping of args to physical registers -------------
-RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) {
+RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) {
const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5};
const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister);
const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3,
@@ -1880,7 +1880,8 @@
}
} else {
if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
- return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide);
+ return is_ref ? ml_->TargetRefReg(coreArgMappingToPhysicalReg[cur_core_reg_++]) :
+ ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide);
}
}
return RegStorage::InvalidReg();
@@ -1897,11 +1898,12 @@
max_mapped_in_ = -1;
is_there_stack_mapped_ = false;
for (int in_position = 0; in_position < count; in_position++) {
- RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide);
+ RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp,
+ arg_locs[in_position].wide, arg_locs[in_position].ref);
if (reg.Valid()) {
mapping_[in_position] = reg;
max_mapped_in_ = std::max(max_mapped_in_, in_position);
- if (reg.Is64BitSolo()) {
+ if (arg_locs[in_position].wide) {
// We covered 2 args, so skip the next one
in_position++;
}