Remove LoadBaseDispWide and StoreBaseDispWide.
Just pass k64 or kDouble to non-wide versions.
Change-Id: I000619c3b78d3a71db42edc747c8a0ba1ee229be
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 9d1723a..8b4576c 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -34,7 +34,6 @@
RegStorage LoadHelper(ThreadOffset<4> offset);
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg);
- LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg);
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size);
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
@@ -42,7 +41,6 @@
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
- LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src);
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size);
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 8391c03..8dd31d1 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1170,19 +1170,14 @@
}
FreeTemp(reg_len);
}
+ LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG);
+ MarkPossibleNullPointerException(opt_flags);
+ if (!constant_index) {
+ FreeTemp(reg_ptr);
+ }
if (rl_dest.wide) {
- LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg, INVALID_SREG);
- MarkPossibleNullPointerException(opt_flags);
- if (!constant_index) {
- FreeTemp(reg_ptr);
- }
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG);
- MarkPossibleNullPointerException(opt_flags);
- if (!constant_index) {
- FreeTemp(reg_ptr);
- }
StoreValue(rl_dest, rl_result);
}
} else {
@@ -1275,11 +1270,7 @@
FreeTemp(reg_len);
}
- if (rl_src.wide) {
- StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg);
- } else {
- StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
- }
+ StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 08acef7..b7b9093 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -957,7 +957,6 @@
LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg) {
- DCHECK(!((size == k64) || (size == kDouble)));
// TODO: base this on target.
if (size == kWord) {
size = k32;
@@ -965,11 +964,6 @@
return LoadBaseDispBody(r_base, displacement, r_dest, size, s_reg);
}
-LIR* ArmMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
- int s_reg) {
- return LoadBaseDispBody(r_base, displacement, r_dest, k64, s_reg);
-}
-
LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
@@ -1091,14 +1085,9 @@
if (size == kWord) {
size = k32;
}
- DCHECK(!((size == k64) || (size == kDouble)));
return StoreBaseDispBody(r_base, displacement, r_src, size);
}
-LIR* ArmMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDispBody(r_base, displacement, r_src, k64);
-}
-
LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
int opcode;
DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 94c2563..4e784c6 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -34,7 +34,6 @@
RegStorage LoadHelper(ThreadOffset<4> offset);
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg);
- LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg);
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size);
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
@@ -42,7 +41,6 @@
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
- LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src);
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size);
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 11fb765..c5a3ab6 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -1170,19 +1170,14 @@
}
FreeTemp(reg_len);
}
+ LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG);
+ MarkPossibleNullPointerException(opt_flags);
+ if (!constant_index) {
+ FreeTemp(reg_ptr);
+ }
if (rl_dest.wide) {
- LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg, INVALID_SREG);
- MarkPossibleNullPointerException(opt_flags);
- if (!constant_index) {
- FreeTemp(reg_ptr);
- }
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG);
- MarkPossibleNullPointerException(opt_flags);
- if (!constant_index) {
- FreeTemp(reg_ptr);
- }
StoreValue(rl_dest, rl_result);
}
} else {
@@ -1275,11 +1270,7 @@
FreeTemp(reg_len);
}
- if (rl_src.wide) {
- StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg);
- } else {
- StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
- }
+ StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index d66b834..8ff1830 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -957,7 +957,6 @@
LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg) {
- DCHECK(!((size == k64) || (size == kDouble)));
// TODO: base this on target.
if (size == kWord) {
size = k32;
@@ -965,11 +964,6 @@
return LoadBaseDispBody(r_base, displacement, r_dest, size, s_reg);
}
-LIR* Arm64Mir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
- int s_reg) {
- return LoadBaseDispBody(r_base, displacement, r_dest, k64, s_reg);
-}
-
LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
@@ -1091,14 +1085,9 @@
if (size == kWord) {
size = k32;
}
- DCHECK(!((size == k64) || (size == kDouble)));
return StoreBaseDispBody(r_base, displacement, r_src, size);
}
-LIR* Arm64Mir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDispBody(r_base, displacement, r_src, k64);
-}
-
LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
int opcode;
DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 2cd17cc..395cff7 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -564,13 +564,8 @@
// There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
- if (is_long_or_double) {
- StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
- } else if (rl_src.ref) {
- StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
- } else {
- Store32Disp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
- }
+ OpSize size = LoadStoreOpSize(is_long_or_double, rl_src.ref);
+ StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, size);
if (field_info.IsVolatile()) {
// A load might follow the volatile store so insert a StoreLoad barrier.
GenMemBarrier(kStoreLoad);
@@ -646,13 +641,8 @@
}
RegLocation rl_result = EvalLoc(rl_dest, result_reg_kind, true);
- if (is_long_or_double) {
- LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG);
- } else if (rl_result.ref) {
- LoadRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg);
- } else {
- Load32Disp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg);
- }
+ OpSize size = LoadStoreOpSize(is_long_or_double, rl_result.ref);
+ LoadBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, size, INVALID_SREG);
FreeTemp(r_base);
if (field_info.IsVolatile()) {
@@ -714,8 +704,8 @@
result_reg_kind = kFPReg;
}
rl_result = EvalLoc(rl_dest, result_reg_kind, true);
- LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg,
- rl_obj.s_reg_low);
+ LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg,
+ size, rl_obj.s_reg_low);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
// Without context sensitive analysis, we must issue the most conservative barriers.
@@ -727,7 +717,7 @@
RegStorage reg_ptr = AllocTemp();
OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
rl_result = EvalLoc(rl_dest, reg_class, true);
- LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG);
+ LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, INVALID_SREG);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
// Without context sensitive analysis, we must issue the most conservative barriers.
@@ -791,7 +781,7 @@
// There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.reg);
+ StoreBaseDisp(reg_ptr, 0, rl_src.reg, size);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
// A load might follow the volatile store so insert a StoreLoad barrier.
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 9c1fbe4..960ac10 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -791,7 +791,7 @@
}
int outs_offset = (next_use + 1) * 4;
if (rl_arg.wide) {
- StoreBaseDispWide(TargetReg(kSp), outs_offset, arg_reg);
+ StoreBaseDisp(TargetReg(kSp), outs_offset, arg_reg, k64);
next_use += 2;
} else {
Store32Disp(TargetReg(kSp), outs_offset, arg_reg);
@@ -859,7 +859,7 @@
if (loc.wide) {
loc = UpdateLocWide(loc);
if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
- StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
}
next_arg += 2;
} else {
@@ -1433,7 +1433,7 @@
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
- LoadBaseDispWide(rl_temp_offset, 0, rl_result.reg, INVALID_SREG);
+ LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, INVALID_SREG);
FreeTemp(rl_temp_offset);
}
} else {
@@ -1480,7 +1480,7 @@
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
- StoreBaseDispWide(rl_temp_offset, 0, rl_value.reg);
+ StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64);
FreeTemp(rl_temp_offset);
}
} else {
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index e6911cd..6fe1e31 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -123,7 +123,7 @@
} else {
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
- LoadBaseDispWide(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, INVALID_SREG);
+ LoadBaseDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64, INVALID_SREG);
}
}
@@ -258,7 +258,7 @@
def_start = last_lir_insn_;
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
- StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64);
MarkClean(rl_dest);
def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -320,7 +320,7 @@
LIR *def_start = last_lir_insn_;
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
- StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64);
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 7a8376e..cdabf8e 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -35,7 +35,6 @@
LIR* LoadBaseDisp(int r_base, int displacement, int r_dest, OpSize size, int s_reg);
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg);
- LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg);
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size);
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
@@ -43,7 +42,6 @@
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
- LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src);
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size);
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1410e14..fe2e495 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -511,7 +511,7 @@
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG);
+ LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, INVALID_SREG);
FreeTemp(reg_ptr);
StoreValueWide(rl_dest, rl_result);
@@ -589,7 +589,7 @@
FreeTemp(reg_len);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.reg);
+ StoreBaseDisp(reg_ptr, 0, rl_src.reg, size);
} else {
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 50b945a..9aa929c 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -551,15 +551,15 @@
if (size == kWord) {
size = k32;
}
- return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size,
- s_reg);
+ if (size == k64 || size == kDouble) {
+ return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), k64, s_reg);
+ } else {
+ return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size,
+ s_reg);
+ }
}
-LIR* MipsMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
- int s_reg) {
- return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), k64, s_reg);
-}
-
+// FIXME: don't split r_dest into 2 containers.
LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
RegStorage r_src, RegStorage r_src_hi, OpSize size) {
LIR *res;
@@ -647,11 +647,11 @@
if (size == kWord) {
size = k32;
}
- return StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size);
-}
-
-LIR* MipsMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), k64);
+ if (size == k64 || size == kDouble) {
+ return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), size);
+ } else {
+ return StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size);
+ }
}
LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index c9e1950..9915ff6 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -59,7 +59,7 @@
RegStorage new_regs = AllocTypedTempWide(false, kAnyReg);
reg_arg_low = new_regs.GetLow();
reg_arg_high = new_regs.GetHigh();
- LoadBaseDispWide(TargetReg(kSp), offset, new_regs, INVALID_SREG);
+ LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64, INVALID_SREG);
} else {
reg_arg_high = AllocTemp();
int offset_high = offset + sizeof(uint32_t);
@@ -112,7 +112,7 @@
OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high);
Load32Disp(TargetReg(kSp), offset, rl_dest.reg.GetLow());
} else {
- LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg, INVALID_SREG);
+ LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64, INVALID_SREG);
}
}
}
@@ -126,6 +126,9 @@
}
bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE));
+ bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
+ OpSize size = LoadStoreOpSize(wide, ref);
+
// The inliner doesn't distinguish kDouble or kFloat, use shorty.
bool double_or_float = cu_->shorty[0] == 'F' || cu_->shorty[0] == 'D';
@@ -134,11 +137,7 @@
LockArg(data.object_arg);
RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float);
RegStorage reg_obj = LoadArg(data.object_arg);
- if (wide) {
- LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg, INVALID_SREG);
- } else {
- Load32Disp(reg_obj, data.field_offset, rl_dest.reg);
- }
+ LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg, size, INVALID_SREG);
if (data.is_volatile) {
// Without context sensitive analysis, we must issue the most conservative barriers.
// In this case, either a load or store may follow so we issue both barriers.
@@ -161,6 +160,8 @@
}
bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE));
+ bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
+ OpSize size = LoadStoreOpSize(wide, ref);
// Point of no return - no aborts after this
GenPrintLabel(mir);
@@ -172,16 +173,12 @@
// There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
- if (wide) {
- StoreBaseDispWide(reg_obj, data.field_offset, reg_src);
- } else {
- Store32Disp(reg_obj, data.field_offset, reg_src);
- }
+ StoreBaseDisp(reg_obj, data.field_offset, reg_src, size);
if (data.is_volatile) {
// A load might follow the volatile store so insert a StoreLoad barrier.
GenMemBarrier(kStoreLoad);
}
- if (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT)) {
+ if (ref) {
MarkGCCard(reg_src, reg_obj);
}
return true;
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index cb4396f..cc6532c 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -977,8 +977,6 @@
virtual RegStorage LoadHelper(ThreadOffset<4> offset) = 0;
virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg) = 0;
- virtual LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
- int s_reg) = 0;
virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) = 0;
virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
@@ -988,7 +986,6 @@
virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0;
virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) = 0;
- virtual LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) = 0;
virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) = 0;
virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
@@ -1263,6 +1260,10 @@
*/
RegLocation ForceTempWide(RegLocation loc);
+ static constexpr OpSize LoadStoreOpSize(bool wide, bool ref) {
+ return wide ? k64 : ref ? kReference : k32;
+ }
+
virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
RegLocation rl_dest, RegLocation rl_src);
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index a39611e..76553af 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -634,14 +634,14 @@
info1 = info2;
}
int v_reg = mir_graph_->SRegToVReg(info1->SReg());
- StoreBaseDispWide(TargetReg(kSp), VRegOffset(v_reg), reg);
+ StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64);
}
} else {
RegisterInfo* info = GetRegInfo(reg);
if (info->IsLive() && info->IsDirty()) {
info->SetIsDirty(false);
int v_reg = mir_graph_->SRegToVReg(info->SReg());
- StoreBaseDispWide(TargetReg(kSp), VRegOffset(v_reg), reg);
+ StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64);
}
}
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 8f0490c..1898738 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -34,7 +34,6 @@
RegStorage LoadHelper(ThreadOffset<4> offset);
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg);
- LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg);
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size);
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
@@ -42,7 +41,6 @@
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
- LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src);
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size);
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 1ed0b63..74828c7 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -149,7 +149,7 @@
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.reg);
+ StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64);
}
}
@@ -183,7 +183,7 @@
if (is_double) {
rl_result = EvalLocWide(rl_dest, kFPReg, true);
- LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, INVALID_SREG);
+ LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, INVALID_SREG);
StoreFinalValueWide(rl_dest, rl_result);
} else {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index b747102..b71a2ce 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -688,14 +688,12 @@
RegLocation rl_dest = size == k64 ? InlineTargetWide(info) : InlineTarget(info);
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ // Unaligned access is allowed on x86.
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG);
if (size == k64) {
- // Unaligned access is allowed on x86.
- LoadBaseDispWide(rl_address.reg, 0, rl_result.reg, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
- // Unaligned access is allowed on x86.
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -709,7 +707,7 @@
if (size == k64) {
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDispWide(rl_address.reg, 0, rl_value.reg);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned access is allowed on x86.
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index da6ded5..7fe0d1f 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -676,11 +676,6 @@
size, s_reg);
}
-LIR* X86Mir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
- int s_reg) {
- return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest, k64, s_reg);
-}
-
LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_src, OpSize size, int s_reg) {
LIR *store = NULL;
@@ -770,11 +765,6 @@
INVALID_SREG);
}
-LIR* X86Mir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement,
- r_src, k64, INVALID_SREG);
-}
-
LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target) {
NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), offset,