Merge "Support inlining with breakpoint"
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 0f1e171..6df341b 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -22,13 +22,14 @@
namespace art {
-class ArmMir2Lir : public Mir2Lir {
+class ArmMir2Lir FINAL : public Mir2Lir {
public:
ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen helpers.
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset offset);
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
@@ -181,8 +182,9 @@
LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg);
LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
- LIR* OpRegRegRegShift(OpKind op, int r_dest, int r_src1, int r_src2, int shift);
- LIR* OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, int shift);
+ LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+ int shift);
+ LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
static const ArmEncodingMap EncodingMap[kArmLast];
int EncodeShift(int code, int amount);
int ModifiedImmediate(uint32_t value);
@@ -202,6 +204,13 @@
RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
bool is_div, bool check_zero);
RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+ typedef struct {
+ OpKind op;
+ uint32_t shift;
+ } EasyMultiplyOp;
+ bool GetEasyMultiplyOp(int lit, EasyMultiplyOp* op);
+ bool GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops);
+ void GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops);
};
} // namespace art
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 46db466..964c2fb 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -425,10 +425,6 @@
if (pattern == DivideNone) {
return false;
}
- // Tuning: add rem patterns
- if (!is_div) {
- return false;
- }
RegStorage r_magic = AllocTemp();
LoadConstant(r_magic, magic_table[lit].magic);
@@ -439,23 +435,136 @@
NewLIR4(kThumb2Smull, r_lo.GetReg(), r_hi.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
switch (pattern) {
case Divide3:
- OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi.GetReg(),
- rl_src.reg.GetReg(), EncodeShift(kArmAsr, 31));
+ OpRegRegRegShift(kOpSub, rl_result.reg, r_hi, rl_src.reg, EncodeShift(kArmAsr, 31));
break;
case Divide5:
OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
- OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo.GetReg(), r_hi.GetReg(),
- EncodeShift(kArmAsr, magic_table[lit].shift));
+ OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi,
+ EncodeShift(kArmAsr, magic_table[lit].shift));
break;
case Divide7:
OpRegReg(kOpAdd, r_hi, rl_src.reg);
OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
- OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo.GetReg(), r_hi.GetReg(),
- EncodeShift(kArmAsr, magic_table[lit].shift));
+ OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi,
+ EncodeShift(kArmAsr, magic_table[lit].shift));
break;
default:
LOG(FATAL) << "Unexpected pattern: " << pattern;
}
+
+ if (!is_div) {
+ RegStorage tmp1 = r_lo;
+ EasyMultiplyOp ops[2];
+
+ bool canEasyMultiply = GetEasyMultiplyTwoOps(lit, ops);
+ DCHECK_NE(canEasyMultiply, false);
+
+ GenEasyMultiplyTwoOps(tmp1, rl_result.reg, ops);
+ OpRegRegReg(kOpSub, rl_result.reg, rl_src.reg, tmp1);
+ }
+
+ StoreValue(rl_dest, rl_result);
+ return true;
+}
+
+// Try to convert *lit to 1 RegRegRegShift/RegRegShift form.
+bool ArmMir2Lir::GetEasyMultiplyOp(int lit, ArmMir2Lir::EasyMultiplyOp* op) {
+ if (IsPowerOfTwo(lit)) {
+ op->op = kOpLsl;
+ op->shift = LowestSetBit(lit);
+ return true;
+ }
+
+ if (IsPowerOfTwo(lit - 1)) {
+ op->op = kOpAdd;
+ op->shift = LowestSetBit(lit - 1);
+ return true;
+ }
+
+ if (IsPowerOfTwo(lit + 1)) {
+ op->op = kOpRsub;
+ op->shift = LowestSetBit(lit + 1);
+ return true;
+ }
+
+ op->op = kOpInvalid;
+ return false;
+}
+
+// Try to convert *lit to 1~2 RegRegRegShift/RegRegShift forms.
+bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) {
+ GetEasyMultiplyOp(lit, &ops[0]);
+ if (GetEasyMultiplyOp(lit, &ops[0])) {
+ ops[1].op = kOpInvalid;
+ return true;
+ }
+
+ int lit1 = lit;
+ uint32_t shift = LowestSetBit(lit1);
+ if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
+ ops[1].op = kOpLsl;
+ ops[1].shift = shift;
+ return true;
+ }
+
+ lit1 = lit - 1;
+ shift = LowestSetBit(lit1);
+ if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
+ ops[1].op = kOpAdd;
+ ops[1].shift = shift;
+ return true;
+ }
+
+ lit1 = lit + 1;
+ shift = LowestSetBit(lit1);
+ if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
+ ops[1].op = kOpRsub;
+ ops[1].shift = shift;
+ return true;
+ }
+
+ return false;
+}
+
+void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops) {
+ // dest = ( src << shift1) + [ src | -src | 0 ]
+ // dest = (dest << shift2) + [ src | -src | 0 ]
+ for (int i = 0; i < 2; i++) {
+ RegStorage r_src2;
+ if (i == 0) {
+ r_src2 = r_src;
+ } else {
+ r_src2 = r_dest;
+ }
+ switch (ops[i].op) {
+ case kOpLsl:
+ OpRegRegImm(kOpLsl, r_dest, r_src2, ops[i].shift);
+ break;
+ case kOpAdd:
+ OpRegRegRegShift(kOpAdd, r_dest, r_src, r_src2, EncodeShift(kArmLsl, ops[i].shift));
+ break;
+ case kOpRsub:
+ OpRegRegRegShift(kOpRsub, r_dest, r_src, r_src2, EncodeShift(kArmLsl, ops[i].shift));
+ break;
+ default:
+ DCHECK_NE(i, 0);
+ DCHECK_EQ(ops[i].op, kOpInvalid);
+ break;
+ }
+ }
+}
+
+bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ EasyMultiplyOp ops[2];
+
+ if (!GetEasyMultiplyTwoOps(lit, ops)) {
+ return false;
+ }
+
+ rl_src = LoadValue(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+
+ GenEasyMultiplyTwoOps(rl_result.reg, rl_src.reg, ops);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -752,7 +861,7 @@
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
- OpRegRegRegShift(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
+ OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
@@ -898,8 +1007,7 @@
NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
rl_src1.reg.GetLowReg());
- OpRegRegRegShift(kOpAdd, res_hi.GetReg(), res_hi.GetReg(), tmp1.GetReg(),
- EncodeShift(kArmLsl, 1));
+ OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
} else {
NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
if (reg_status == 2) {
@@ -1009,8 +1117,7 @@
} else {
// No special indexed operation, lea + load w/ displacement
reg_ptr = AllocTemp();
- OpRegRegRegShift(kOpAdd, reg_ptr.GetReg(), rl_array.reg.GetReg(), rl_index.reg.GetReg(),
- EncodeShift(kArmLsl, scale));
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
FreeTemp(rl_index.reg.GetReg());
}
rl_result = EvalLoc(rl_dest, reg_class, true);
@@ -1117,8 +1224,7 @@
rl_src = LoadValue(rl_src, reg_class);
}
if (!constant_index) {
- OpRegRegRegShift(kOpAdd, reg_ptr.GetReg(), rl_array.reg.GetReg(), rl_index.reg.GetReg(),
- EncodeShift(kArmLsl, scale));
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
}
if (needs_range_check) {
if (constant_index) {
@@ -1183,7 +1289,7 @@
LoadConstant(rl_result.reg.GetLow(), 0);
} else {
OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetLowReg(),
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), rl_src.reg.GetLow(),
EncodeShift(kArmLsr, 32 - shift_amount));
OpRegRegImm(kOpLsl, rl_result.reg.GetLow(), rl_src.reg.GetLow(), shift_amount);
}
@@ -1199,7 +1305,7 @@
} else {
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetLowReg(), t_reg.GetReg(), rl_src.reg.GetHighReg(),
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
EncodeShift(kArmLsl, 32 - shift_amount));
FreeTemp(t_reg);
OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
@@ -1216,7 +1322,7 @@
} else {
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetLowReg(), t_reg.GetReg(), rl_src.reg.GetHighReg(),
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
EncodeShift(kArmLsl, 32 - shift_amount));
FreeTemp(t_reg);
OpRegRegImm(kOpLsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 1ec0a2c..cf90fb1 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -234,9 +234,10 @@
return NewLIR1(opcode, r_dest_src.GetReg());
}
-LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2,
+LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
int shift) {
- bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
+ bool thumb_form =
+ ((shift == 0) && ARM_LOWREG(r_dest_src1.GetReg()) && ARM_LOWREG(r_src2.GetReg()));
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdc:
@@ -255,9 +256,9 @@
case kOpCmp:
if (thumb_form)
opcode = kThumbCmpRR;
- else if ((shift == 0) && !ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
+ else if ((shift == 0) && !ARM_LOWREG(r_dest_src1.GetReg()) && !ARM_LOWREG(r_src2.GetReg()))
opcode = kThumbCmpHH;
- else if ((shift == 0) && ARM_LOWREG(r_dest_src1))
+ else if ((shift == 0) && ARM_LOWREG(r_dest_src1.GetReg()))
opcode = kThumbCmpLH;
else if (shift == 0)
opcode = kThumbCmpHL;
@@ -269,11 +270,11 @@
break;
case kOpMov:
DCHECK_EQ(shift, 0);
- if (ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2))
+ if (ARM_LOWREG(r_dest_src1.GetReg()) && ARM_LOWREG(r_src2.GetReg()))
opcode = kThumbMovRR;
- else if (!ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
+ else if (!ARM_LOWREG(r_dest_src1.GetReg()) && !ARM_LOWREG(r_src2.GetReg()))
opcode = kThumbMovRR_H2H;
- else if (ARM_LOWREG(r_dest_src1))
+ else if (ARM_LOWREG(r_dest_src1.GetReg()))
opcode = kThumbMovRR_H2L;
else
opcode = kThumbMovRR_L2H;
@@ -324,7 +325,7 @@
DCHECK_EQ(shift, 0);
if (!thumb_form) {
// Binary, but rm is encoded twice.
- return NewLIR3(kThumb2RevRR, r_dest_src1, r_src2, r_src2);
+ return NewLIR3(kThumb2RevRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
}
opcode = kThumbRev;
break;
@@ -332,34 +333,34 @@
DCHECK_EQ(shift, 0);
if (!thumb_form) {
// Binary, but rm is encoded twice.
- return NewLIR3(kThumb2RevshRR, r_dest_src1, r_src2, r_src2);
+ return NewLIR3(kThumb2RevshRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
}
opcode = kThumbRevsh;
break;
case kOp2Byte:
DCHECK_EQ(shift, 0);
- return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
+ return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 8);
case kOp2Short:
DCHECK_EQ(shift, 0);
- return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
+ return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
case kOp2Char:
DCHECK_EQ(shift, 0);
- return NewLIR4(kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
+ return NewLIR4(kThumb2Ubfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
default:
LOG(FATAL) << "Bad opcode: " << op;
break;
}
DCHECK(!IsPseudoLirOp(opcode));
if (EncodingMap[opcode].flags & IS_BINARY_OP) {
- return NewLIR2(opcode, r_dest_src1, r_src2);
+ return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
} else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
if (EncodingMap[opcode].field_loc[2].kind == kFmtShift) {
- return NewLIR3(opcode, r_dest_src1, r_src2, shift);
+ return NewLIR3(opcode, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
} else {
- return NewLIR3(opcode, r_dest_src1, r_dest_src1, r_src2);
+ return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg());
}
} else if (EncodingMap[opcode].flags & IS_QUAD_OP) {
- return NewLIR4(opcode, r_dest_src1, r_dest_src1, r_src2, shift);
+ return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
} else {
LOG(FATAL) << "Unexpected encoding operand count";
return NULL;
@@ -367,7 +368,7 @@
}
LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
- return OpRegRegShift(op, r_dest_src1.GetReg(), r_src2.GetReg(), 0);
+ return OpRegRegShift(op, r_dest_src1, r_src2, 0);
}
LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
@@ -385,11 +386,11 @@
return NULL;
}
-LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1,
- int r_src2, int shift) {
+LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
+ RegStorage r_src2, int shift) {
ArmOpcode opcode = kThumbBkpt;
- bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) &&
- ARM_LOWREG(r_src2);
+ bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src1.GetReg()) &&
+ ARM_LOWREG(r_src2.GetReg());
switch (op) {
case kOpAdd:
opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
@@ -448,15 +449,15 @@
}
DCHECK(!IsPseudoLirOp(opcode));
if (EncodingMap[opcode].flags & IS_QUAD_OP) {
- return NewLIR4(opcode, r_dest, r_src1, r_src2, shift);
+ return NewLIR4(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
} else {
DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
- return NewLIR3(opcode, r_dest, r_src1, r_src2);
+ return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
}
}
LIR* ArmMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
- return OpRegRegRegShift(op, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), 0);
+ return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
}
LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 2afa5ca..b23e10f 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1626,14 +1626,31 @@
// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
// and store the result in 'rl_dest'.
bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ if (lit < 0) {
+ return false;
+ }
+ if (lit == 0) {
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ LoadConstant(rl_result.reg, 0);
+ StoreValue(rl_dest, rl_result);
+ return true;
+ }
+ if (lit == 1) {
+ rl_src = LoadValue(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ OpRegCopy(rl_result.reg, rl_src.reg);
+ StoreValue(rl_dest, rl_result);
+ return true;
+ }
+ // There is RegRegRegShift on Arm, so check for more special cases
+ if (cu_->instruction_set == kThumb2) {
+ return EasyMultiply(rl_src, rl_dest, lit);
+ }
// Can we simplify this multiplication?
bool power_of_two = false;
bool pop_count_le2 = false;
bool power_of_two_minus_one = false;
- if (lit < 2) {
- // Avoid special cases.
- return false;
- } else if (IsPowerOfTwo(lit)) {
+ if (IsPowerOfTwo(lit)) {
power_of_two = true;
} else if (IsPopCountLE2(lit)) {
pop_count_le2 = true;
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index bc1ad02..0ef43b3 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -22,13 +22,14 @@
namespace art {
-class MipsMir2Lir : public Mir2Lir {
+class MipsMir2Lir FINAL : public Mir2Lir {
public:
MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen utilities.
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset offset);
LIR* LoadBaseDisp(int r_base, int displacement, int r_dest, OpSize size, int s_reg);
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index dfe8b35..270d895 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -368,6 +368,11 @@
return false;
}
+bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
+ return false;
+}
+
LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) {
LOG(FATAL) << "Unexpected use of OpIT in Mips";
return NULL;
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 1c8f6dc..68c3d0f 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -811,6 +811,7 @@
// Required for target - codegen helpers.
virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
+ virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
virtual LIR* CheckSuspendUsingLoad() = 0;
virtual RegStorage LoadHelper(ThreadOffset offset) = 0;
virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 6d427e7..4c495a1 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -22,13 +22,14 @@
namespace art {
-class X86Mir2Lir : public Mir2Lir {
+class X86Mir2Lir FINAL : public Mir2Lir {
public:
X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen helpers.
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset offset);
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 37b2b37..5ef7060 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -909,6 +909,11 @@
return false;
}
+bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ LOG(FATAL) << "Unexpected use of easyMultiply in x86";
+ return false;
+}
+
LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
LOG(FATAL) << "Unexpected use of OpIT in x86";
return NULL;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 447854f..3082273 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -197,6 +197,33 @@
.cfi_adjust_cfa_offset -304
.endm
+.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME_NO_D0
+
+ ldr d1, [sp, #24]
+ ldp d2, d3, [sp, #32]
+ ldp d4, d5, [sp, #48]
+ ldp d6, d7, [sp, #64]
+ ldp d8, d9, [sp, #80]
+ ldp d10, d11, [sp, #96]
+ ldp d12, d13, [sp, #112]
+ ldp d14, d15, [sp, #128]
+
+ // args.
+ ldp x1, x2, [sp, #144]
+ ldp x3, x4, [sp, #160]
+ ldp x5, x6, [sp, #176]
+ ldp x7, xSELF, [sp, #192]
+ ldp x19, x20, [sp, #208]
+ ldp x21, x22, [sp, #224]
+ ldp x23, x24, [sp, #240]
+ ldp x25, x26, [sp, #256]
+ ldp x27, x28, [sp, #272]
+ ldp xFP, xLR, [sp, #288]
+
+ add sp, sp, #304
+ .cfi_adjust_cfa_offset -304
+.endm
+
.macro RETURN_IF_RESULT_IS_ZERO
brk 0
.endm
@@ -876,11 +903,27 @@
UNIMPLEMENTED art_quick_test_suspend
-/**
- * Returned by ClassLinker::GetOatCodeFor
- *
- */
-UNIMPLEMENTED art_quick_proxy_invoke_handler
+ /*
+ * Called by managed code that is attempting to call a method on a proxy class. On entry
+ * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy
+ * method agrees with a ref and args callee save frame.
+ */
+ .extern artQuickProxyInvokeHandler
+ENTRY art_quick_proxy_invoke_handler
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ str x0, [sp, #0] // place proxy method at bottom of frame
+ mov x2, xSELF // pass Thread::Current
+ mov x3, sp // pass SP
+ bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP)
+ ldr xSELF, [sp, #200] // Restore self pointer.
+ ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET]
+ cbnz x2, .Lexception_in_proxy // success if no exception is pending
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME_NO_D0 // keep d0
+ ret // return on success
+.Lexception_in_proxy:
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ DELIVER_PENDING_EXCEPTION
+END art_quick_proxy_invoke_handler
UNIMPLEMENTED art_quick_imt_conflict_trampoline
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 829ec4a..9e5f54c 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -200,7 +200,7 @@
}
ThrowLocation throw_location(rcvr, proxy_method, -1);
JValue result_unboxed;
- if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, result_unboxed)) {
+ if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, &result_unboxed)) {
DCHECK(soa.Self()->IsExceptionPending());
return zero;
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index dac287f..3d2fd7b 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -155,8 +155,16 @@
}
}
-// Similar to memmove except elements are of aligned appropriately for T, count is in T sized units
-// copies are guaranteed not to tear when T is less-than 64bit.
+template<typename T>
+inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
+ DCHECK(array_class_ != NULL);
+ Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ return down_cast<PrimitiveArray<T>*>(raw_array);
+}
+
+// Backward copy where elements are of aligned appropriately for T. Count is in T sized units.
+// Copies are guaranteed not to tear when the sizeof T is less-than 64bit.
template<typename T>
static inline void ArrayBackwardCopy(T* d, const T* s, int32_t count) {
d += count;
@@ -168,12 +176,15 @@
}
}
+// Forward copy where elements are of aligned appropriately for T. Count is in T sized units.
+// Copies are guaranteed not to tear when the sizeof T is less-than 64bit.
template<typename T>
-inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
- DCHECK(array_class_ != NULL);
- Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T),
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
- return down_cast<PrimitiveArray<T>*>(raw_array);
+static inline void ArrayForwardCopy(T* d, const T* s, int32_t count) {
+ for (int32_t i = 0; i < count; ++i) {
+ *d = *s;
+ d++;
+ s++;
+ }
}
template<class T>
@@ -193,47 +204,49 @@
// Note for non-byte copies we can't rely on standard libc functions like memcpy(3) and memmove(3)
// in our implementation, because they may copy byte-by-byte.
- if (LIKELY(src != this) || (dst_pos < src_pos) || (dst_pos - src_pos >= count)) {
- // Forward copy ok.
+ if (LIKELY(src != this)) {
+ // Memcpy ok for guaranteed non-overlapping distinct arrays.
Memcpy(dst_pos, src, src_pos, count);
} else {
- // Backward copy necessary.
+ // Handle copies within the same array using the appropriate direction copy.
void* dst_raw = GetRawData(sizeof(T), dst_pos);
const void* src_raw = src->GetRawData(sizeof(T), src_pos);
if (sizeof(T) == sizeof(uint8_t)) {
- // TUNING: use memmove here?
uint8_t* d = reinterpret_cast<uint8_t*>(dst_raw);
const uint8_t* s = reinterpret_cast<const uint8_t*>(src_raw);
- ArrayBackwardCopy<uint8_t>(d, s, count);
- } else if (sizeof(T) == sizeof(uint16_t)) {
- uint16_t* d = reinterpret_cast<uint16_t*>(dst_raw);
- const uint16_t* s = reinterpret_cast<const uint16_t*>(src_raw);
- ArrayBackwardCopy<uint16_t>(d, s, count);
- } else if (sizeof(T) == sizeof(uint32_t)) {
- uint32_t* d = reinterpret_cast<uint32_t*>(dst_raw);
- const uint32_t* s = reinterpret_cast<const uint32_t*>(src_raw);
- ArrayBackwardCopy<uint32_t>(d, s, count);
+ memmove(d, s, count);
} else {
- DCHECK_EQ(sizeof(T), sizeof(uint64_t));
- uint64_t* d = reinterpret_cast<uint64_t*>(dst_raw);
- const uint64_t* s = reinterpret_cast<const uint64_t*>(src_raw);
- ArrayBackwardCopy<uint64_t>(d, s, count);
+ const bool copy_forward = (dst_pos < src_pos) || (dst_pos - src_pos >= count);
+ if (sizeof(T) == sizeof(uint16_t)) {
+ uint16_t* d = reinterpret_cast<uint16_t*>(dst_raw);
+ const uint16_t* s = reinterpret_cast<const uint16_t*>(src_raw);
+ if (copy_forward) {
+ ArrayForwardCopy<uint16_t>(d, s, count);
+ } else {
+ ArrayBackwardCopy<uint16_t>(d, s, count);
+ }
+ } else if (sizeof(T) == sizeof(uint32_t)) {
+ uint32_t* d = reinterpret_cast<uint32_t*>(dst_raw);
+ const uint32_t* s = reinterpret_cast<const uint32_t*>(src_raw);
+ if (copy_forward) {
+ ArrayForwardCopy<uint32_t>(d, s, count);
+ } else {
+ ArrayBackwardCopy<uint32_t>(d, s, count);
+ }
+ } else {
+ DCHECK_EQ(sizeof(T), sizeof(uint64_t));
+ uint64_t* d = reinterpret_cast<uint64_t*>(dst_raw);
+ const uint64_t* s = reinterpret_cast<const uint64_t*>(src_raw);
+ if (copy_forward) {
+ ArrayForwardCopy<uint64_t>(d, s, count);
+ } else {
+ ArrayBackwardCopy<uint64_t>(d, s, count);
+ }
+ }
}
}
}
-// Similar to memcpy except elements are of aligned appropriately for T, count is in T sized units
-// copies are guaranteed not to tear when T is less-than 64bit.
-template<typename T>
-static inline void ArrayForwardCopy(T* d, const T* s, int32_t count) {
- for (int32_t i = 0; i < count; ++i) {
- *d = *s;
- d++;
- s++;
- }
-}
-
-
template<class T>
inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
int32_t count) {
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 6667d51..a69bd05 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -141,7 +141,7 @@
// Widen it if necessary (and possible).
JValue wide_value;
if (!ConvertPrimitiveValue(NULL, false, field_type, Primitive::GetType(dst_descriptor),
- field_value, wide_value)) {
+ field_value, &wide_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
@@ -257,7 +257,7 @@
// Unbox the value, if necessary.
mirror::Object* boxed_value = soa.Decode<mirror::Object*>(javaValue);
JValue unboxed_value;
- if (!UnboxPrimitiveForField(boxed_value, field_type, unboxed_value, f)) {
+ if (!UnboxPrimitiveForField(boxed_value, field_type, f, &unboxed_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return;
}
@@ -282,7 +282,7 @@
// Widen the value if necessary (and possible).
JValue wide_value;
if (!ConvertPrimitiveValue(nullptr, false, Primitive::GetType(src_descriptor),
- field_type, new_value, wide_value)) {
+ field_type, new_value, &wide_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return;
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index f567055..7f39e70 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -543,76 +543,58 @@
bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
Primitive::Type srcType, Primitive::Type dstType,
- const JValue& src, JValue& dst) {
- CHECK(srcType != Primitive::kPrimNot && dstType != Primitive::kPrimNot);
+ const JValue& src, JValue* dst) {
+ DCHECK(srcType != Primitive::kPrimNot && dstType != Primitive::kPrimNot);
+ if (LIKELY(srcType == dstType)) {
+ dst->SetJ(src.GetJ());
+ return true;
+ }
switch (dstType) {
- case Primitive::kPrimBoolean:
- if (srcType == Primitive::kPrimBoolean) {
- dst.SetZ(src.GetZ());
- return true;
- }
- break;
- case Primitive::kPrimChar:
- if (srcType == Primitive::kPrimChar) {
- dst.SetC(src.GetC());
- return true;
- }
- break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
case Primitive::kPrimByte:
- if (srcType == Primitive::kPrimByte) {
- dst.SetB(src.GetB());
- return true;
- }
+ // Only expect assignment with source and destination of identical type.
break;
case Primitive::kPrimShort:
- if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimShort) {
- dst.SetS(src.GetI());
+ if (srcType == Primitive::kPrimByte) {
+ dst->SetS(src.GetI());
return true;
}
break;
case Primitive::kPrimInt:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
- srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetI(src.GetI());
+ srcType == Primitive::kPrimShort) {
+ dst->SetI(src.GetI());
return true;
}
break;
case Primitive::kPrimLong:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetJ(src.GetI());
- return true;
- } else if (srcType == Primitive::kPrimLong) {
- dst.SetJ(src.GetJ());
+ dst->SetJ(src.GetI());
return true;
}
break;
case Primitive::kPrimFloat:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetF(src.GetI());
+ dst->SetF(src.GetI());
return true;
} else if (srcType == Primitive::kPrimLong) {
- dst.SetF(src.GetJ());
- return true;
- } else if (srcType == Primitive::kPrimFloat) {
- dst.SetF(src.GetF());
+ dst->SetF(src.GetJ());
return true;
}
break;
case Primitive::kPrimDouble:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetD(src.GetI());
+ dst->SetD(src.GetI());
return true;
} else if (srcType == Primitive::kPrimLong) {
- dst.SetD(src.GetJ());
+ dst->SetD(src.GetJ());
return true;
} else if (srcType == Primitive::kPrimFloat) {
- dst.SetD(src.GetF());
- return true;
- } else if (srcType == Primitive::kPrimDouble) {
- dst.SetJ(src.GetJ());
+ dst->SetD(src.GetF());
return true;
}
break;
@@ -642,7 +624,7 @@
return nullptr;
}
- jmethodID m = NULL;
+ jmethodID m = nullptr;
const char* shorty;
switch (src_class) {
case Primitive::kPrimBoolean:
@@ -698,29 +680,25 @@
return result.GetL();
}
-static std::string UnboxingFailureKind(mirror::ArtMethod* m, int index, mirror::ArtField* f)
+static std::string UnboxingFailureKind(mirror::ArtField* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (m != NULL && index != -1) {
- ++index; // Humans count from 1.
- return StringPrintf("method %s argument %d", PrettyMethod(m, false).c_str(), index);
- }
- if (f != NULL) {
+ if (f != nullptr) {
return "field " + PrettyField(f, false);
}
return "result";
}
static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtMethod* m, int index, mirror::ArtField* f)
+ mirror::Class* dst_class, mirror::ArtField* f,
+ JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- bool unbox_for_result = (f == NULL) && (index == -1);
+ bool unbox_for_result = (f == nullptr);
if (!dst_class->IsPrimitive()) {
- if (UNLIKELY(o != NULL && !o->InstanceOf(dst_class))) {
+ if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) {
if (!unbox_for_result) {
ThrowIllegalArgumentException(throw_location,
StringPrintf("%s has type %s, got %s",
- UnboxingFailureKind(m, index, f).c_str(),
+ UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str(),
PrettyTypeOf(o).c_str()).c_str());
} else {
@@ -731,20 +709,20 @@
}
return false;
}
- unboxed_value.SetL(o);
+ unboxed_value->SetL(o);
return true;
}
if (UNLIKELY(dst_class->GetPrimitiveType() == Primitive::kPrimVoid)) {
ThrowIllegalArgumentException(throw_location,
StringPrintf("Can't unbox %s to void",
- UnboxingFailureKind(m, index, f).c_str()).c_str());
+ UnboxingFailureKind(f).c_str()).c_str());
return false;
}
- if (UNLIKELY(o == NULL)) {
+ if (UNLIKELY(o == nullptr)) {
if (!unbox_for_result) {
ThrowIllegalArgumentException(throw_location,
StringPrintf("%s has type %s, got null",
- UnboxingFailureKind(m, index, f).c_str(),
+ UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str()).c_str());
} else {
ThrowNullPointerException(throw_location,
@@ -756,7 +734,7 @@
JValue boxed_value;
const StringPiece src_descriptor(ClassHelper(o->GetClass()).GetDescriptor());
- mirror::Class* src_class = NULL;
+ mirror::Class* src_class = nullptr;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::ArtField* primitive_field = o->GetClass()->GetIFields()->Get(0);
if (src_descriptor == "Ljava/lang/Boolean;") {
@@ -786,7 +764,7 @@
} else {
ThrowIllegalArgumentException(throw_location,
StringPrintf("%s has type %s, got %s",
- UnboxingFailureKind(m, index, f).c_str(),
+ UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str(),
PrettyDescriptor(src_descriptor.data()).c_str()).c_str());
return false;
@@ -797,21 +775,15 @@
boxed_value, unboxed_value);
}
-bool UnboxPrimitiveForArgument(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtMethod* m, size_t index) {
- CHECK(m != NULL);
- return UnboxPrimitive(NULL, o, dst_class, unboxed_value, m, index, NULL);
-}
-
-bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtField* f) {
- CHECK(f != NULL);
- return UnboxPrimitive(NULL, o, dst_class, unboxed_value, NULL, -1, f);
+bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
+ JValue* unboxed_value) {
+ DCHECK(f != nullptr);
+ return UnboxPrimitive(nullptr, o, dst_class, f, unboxed_value);
}
bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue& unboxed_value) {
- return UnboxPrimitive(&throw_location, o, dst_class, unboxed_value, NULL, -1, NULL);
+ mirror::Class* dst_class, JValue* unboxed_value) {
+ return UnboxPrimitive(&throw_location, o, dst_class, nullptr, unboxed_value);
}
} // namespace art
diff --git a/runtime/reflection.h b/runtime/reflection.h
index d2f9f25..325998f 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -36,19 +36,16 @@
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool UnboxPrimitiveForArgument(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtMethod* m, size_t index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtField* f)
+bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
+ JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue& unboxed_value)
+ mirror::Class* dst_class, JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
Primitive::Type src_class, Primitive::Type dst_class,
- const JValue& src, JValue& dst)
+ const JValue& src, JValue* dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, va_list args)
diff --git a/runtime/thread.h b/runtime/thread.h
index b063b1e..32875e6 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -95,9 +95,13 @@
class PACKED(4) Thread {
public:
// Space to throw a StackOverflowError in.
-#if __LP64__
// TODO: shrink reserved space, in particular for 64bit.
+#if defined(__x86_64__)
static constexpr size_t kStackOverflowReservedBytes = 24 * KB;
+#elif defined(__aarch64__)
+ // Worst-case, we would need about 2.6x the amount of x86_64 for many more registers.
+ // But this one works rather well.
+ static constexpr size_t kStackOverflowReservedBytes = 32 * KB;
#else
static constexpr size_t kStackOverflowReservedBytes = 16 * KB;
#endif