Merge "Support for --gdb for target run-test." into dalvik-dev
diff --git a/src/compiler/codegen/arm/codegen_arm.h b/src/compiler/codegen/arm/codegen_arm.h
index f085a19..ca39e5a 100644
--- a/src/compiler/codegen/arm/codegen_arm.h
+++ b/src/compiler/codegen/arm/codegen_arm.h
@@ -39,7 +39,6 @@
virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
virtual LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
int val_lo, int val_hi);
- virtual void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg);
virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
OpSize size);
virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
@@ -90,6 +89,12 @@
virtual bool IsUnconditionalBranch(LIR* lir);
// Required for target - Dalvik-level generators.
+ virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
+ virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale);
+ virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
virtual bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
virtual bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
@@ -191,6 +196,7 @@
static int EncodeShift(int code, int amount);
static int ModifiedImmediate(uint32_t value);
static ArmConditionCode ArmConditionEncoding(ConditionCode code);
+ bool InexpensiveConstant(int reg, int value);
};
} // namespace art
diff --git a/src/compiler/codegen/arm/int_arm.cc b/src/compiler/codegen/arm/int_arm.cc
index 0a6abd2..e86f379 100644
--- a/src/compiler/codegen/arm/int_arm.cc
+++ b/src/compiler/codegen/arm/int_arm.cc
@@ -558,4 +558,204 @@
return false;
}
+/*
+ * Generate array load
+ */
+void ArmCodegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset;
+ RegLocation rl_result;
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+
+ if (rl_dest.wide) {
+ data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = AllocTemp(cu);
+ /* Get len */
+ LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+ }
+ if (rl_dest.wide || rl_dest.fp) {
+ // No special indexed operation, lea + load w/ displacement
+ int reg_ptr = AllocTemp(cu);
+ OpRegRegRegShift(cu, kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+ EncodeShift(kArmLsl, scale));
+ FreeTemp(cu, rl_index.low_reg);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ // TODO: change kCondCS to a more meaningful name, is the sense of
+ // carry-set/clear flipped?
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ if (rl_dest.wide) {
+ LoadBaseDispWide(cu, reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ FreeTemp(cu, reg_ptr);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ LoadBaseDisp(cu, reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
+ FreeTemp(cu, reg_ptr);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+ } else {
+ // Offset base, then use indexed load
+ int reg_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ FreeTemp(cu, rl_array.low_reg);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ // TODO: change kCondCS to a more meaningful name, is the sense of
+ // carry-set/clear flipped?
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ LoadBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+ FreeTemp(cu, reg_ptr);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void ArmCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset;
+
+ if (size == kLong || size == kDouble) {
+ data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+ int reg_ptr = INVALID_REG;
+ if (IsTemp(cu, rl_array.low_reg)) {
+ Clobber(cu, rl_array.low_reg);
+ reg_ptr = rl_array.low_reg;
+ } else {
+ reg_ptr = AllocTemp(cu);
+ }
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = AllocTemp(cu);
+ //NOTE: max live temps(4) here.
+ /* Get len */
+ LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+ }
+ /* at this point, reg_ptr points to array, 2 live temps */
+ if (rl_src.wide || rl_src.fp) {
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(cu, rl_src, reg_class);
+ } else {
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ }
+ OpRegRegRegShift(cu, kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+ EncodeShift(kArmLsl, scale));
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ if (rl_src.wide) {
+ StoreBaseDispWide(cu, reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
+ } else {
+ StoreBaseDisp(cu, reg_ptr, data_offset, rl_src.low_reg, size);
+ }
+ } else {
+ /* reg_ptr -> array data */
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ StoreBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_src.low_reg,
+ scale, size);
+ }
+ FreeTemp(cu, reg_ptr);
+}
+
+/*
+ * Generate array store
+ *
+ */
+void ArmCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value();
+
+ FlushAllRegs(cu); // Use explicit registers
+ LockCallTemps(cu);
+
+ int r_value = TargetReg(kArg0); // Register holding value
+ int r_array_class = TargetReg(kArg1); // Register holding array's Class
+ int r_array = TargetReg(kArg2); // Register holding array
+ int r_index = TargetReg(kArg3); // Register holding index into array
+
+ LoadValueDirectFixed(cu, rl_array, r_array); // Grab array
+ LoadValueDirectFixed(cu, rl_src, r_value); // Grab value
+ LoadValueDirectFixed(cu, rl_index, r_index); // Grab index
+
+ GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags); // NPE?
+
+ // Store of null?
+ LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
+
+ // Get the array's class.
+ LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ r_array_class, true);
+ // Redo LoadValues in case they didn't survive the call.
+ LoadValueDirectFixed(cu, rl_array, r_array); // Reload array
+ LoadValueDirectFixed(cu, rl_index, r_index); // Reload index
+ LoadValueDirectFixed(cu, rl_src, r_value); // Reload value
+ r_array_class = INVALID_REG;
+
+ // Branch here if value to be stored == null
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ null_value_check->target = target;
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = TargetReg(kArg1);
+ LoadWordDisp(cu, r_array, len_offset, reg_len); // Get len
+ }
+ /* r_ptr -> array data */
+ int r_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, r_ptr, r_array, data_offset);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, r_index, reg_len, kThrowArrayBounds);
+ }
+ StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
+ FreeTemp(cu, r_ptr);
+ FreeTemp(cu, r_index);
+ MarkGCCard(cu, r_value, r_array);
+}
+
} // namespace art
diff --git a/src/compiler/codegen/arm/utility_arm.cc b/src/compiler/codegen/arm/utility_arm.cc
index 7f37bea..5c25eee 100644
--- a/src/compiler/codegen/arm/utility_arm.cc
+++ b/src/compiler/codegen/arm/utility_arm.cc
@@ -126,6 +126,21 @@
return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
}
+bool ArmCodegen::InexpensiveConstant(int reg, int value)
+{
+ bool res = false;
+ if (ARM_FPREG(reg)) {
+ res = (EncodeImmSingle(value) >= 0);
+ } else {
+ if (ARM_LOWREG(reg) && (value >= 0) && (IsUint(8, value))) {
+ res = true;
+ } else {
+ res = (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
+ }
+ }
+ return res;
+}
+
/*
* Load a immediate using a shortcut if possible; otherwise
* grab from the per-translation literal pool.
@@ -1011,11 +1026,6 @@
return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
}
-void ArmCodegen::LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg)
-{
- LoadBaseDispWide(cu, base, 0, low_reg, high_reg, INVALID_SREG);
-}
-
LIR* ArmCodegen::OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
{
int opcode;
diff --git a/src/compiler/codegen/codegen.h b/src/compiler/codegen/codegen.h
index e512803..7a85ce8 100644
--- a/src/compiler/codegen/codegen.h
+++ b/src/compiler/codegen/codegen.h
@@ -135,12 +135,6 @@
void GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src);
void GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src);
- void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_src, int scale);
- void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_dest, int scale);
- void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_src, int scale);
void GenLong3Addr(CompilationUnit* cu, OpKind first_op, OpKind second_op, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
bool GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
@@ -245,7 +239,6 @@
virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value) = 0;
virtual LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
int val_lo, int val_hi) = 0;
- virtual void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg) = 0;
virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
OpSize size) = 0;
virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
@@ -350,6 +343,12 @@
RegLocation rl_src) = 0;
virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
SpecialCaseHandler special_case) = 0;
+ virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale) = 0;
+ virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
+ virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale) = 0;
// Required for target - single operation generators.
virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target) = 0;
@@ -382,6 +381,7 @@
virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
int src_hi) = 0;
virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val) = 0;
+ virtual bool InexpensiveConstant(int reg, int value) = 0;
// Temp workaround
void Workaround7250540(CompilationUnit* cu, RegLocation rl_dest, int value);
diff --git a/src/compiler/codegen/codegen_util.cc b/src/compiler/codegen/codegen_util.cc
index bab5cd9..77a2269 100644
--- a/src/compiler/codegen/codegen_util.cc
+++ b/src/compiler/codegen/codegen_util.cc
@@ -1074,4 +1074,27 @@
return res;
}
+bool EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2)
+{
+ bool is_taken;
+ switch (opcode) {
+ case Instruction::IF_EQ: is_taken = (src1 == src2); break;
+ case Instruction::IF_NE: is_taken = (src1 != src2); break;
+ case Instruction::IF_LT: is_taken = (src1 < src2); break;
+ case Instruction::IF_GE: is_taken = (src1 >= src2); break;
+ case Instruction::IF_GT: is_taken = (src1 > src2); break;
+ case Instruction::IF_LE: is_taken = (src1 <= src2); break;
+ case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
+ case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
+ case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
+ case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
+ case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
+ case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
+ default:
+ LOG(FATAL) << "Unexpected opcode " << opcode;
+ is_taken = false;
+ }
+ return is_taken;
+}
+
} // namespace art
diff --git a/src/compiler/codegen/codegen_util.h b/src/compiler/codegen/codegen_util.h
index 6a9b6cd..3bb4291 100644
--- a/src/compiler/codegen/codegen_util.h
+++ b/src/compiler/codegen/codegen_util.h
@@ -51,6 +51,7 @@
void DumpPackedSwitchTable(const uint16_t* table);
LIR* MarkBoundary(CompilationUnit* cu, int offset, const char* inst_str);
void NopLIR(LIR* lir);
+bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
} // namespace art
diff --git a/src/compiler/codegen/gen_common.cc b/src/compiler/codegen/gen_common.cc
index db99a30..275aee5 100644
--- a/src/compiler/codegen/gen_common.cc
+++ b/src/compiler/codegen/gen_common.cc
@@ -89,13 +89,28 @@
return branch;
}
+// Convert relation of src1/src2 to src2/src1
+ConditionCode FlipComparisonOrder(ConditionCode before) {
+ ConditionCode res;
+ switch (before) {
+ case kCondEq: res = kCondEq; break;
+ case kCondNe: res = kCondNe; break;
+ case kCondLt: res = kCondGt; break;
+ case kCondGt: res = kCondLt; break;
+ case kCondLe: res = kCondGe; break;
+ case kCondGe: res = kCondLe; break;
+ default:
+ res = static_cast<ConditionCode>(0);
+ LOG(FATAL) << "Unexpected ccode " << before;
+ }
+ return res;
+}
+
void Codegen::GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode,
RegLocation rl_src1, RegLocation rl_src2, LIR* taken,
LIR* fall_through)
{
ConditionCode cond;
- rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
- rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
switch (opcode) {
case Instruction::IF_EQ:
cond = kCondEq;
@@ -119,6 +134,29 @@
cond = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected opcode " << opcode;
}
+
+ // Normalize such that if either operand is constant, src2 will be constant
+ if (rl_src1.is_const) {
+ RegLocation rl_temp = rl_src1;
+ rl_src1 = rl_src2;
+ rl_src2 = rl_temp;
+ cond = FlipComparisonOrder(cond);
+ }
+
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ // Is this really an immediate comparison?
+ if (rl_src2.is_const) {
+ int immval = cu->constant_values[rl_src2.orig_sreg];
+ // If it's already live in a register or not easily materialized, just keep going
+ RegLocation rl_temp = UpdateLoc(cu, rl_src2);
+ if ((rl_temp.location == kLocDalvikFrame) && InexpensiveConstant(rl_src1.low_reg, immval)) {
+ // OK - convert this to a compare immediate and branch
+ OpCmpImmBranch(cu, cond, rl_src1.low_reg, immval, taken);
+ OpUnconditionalBranch(cu, fall_through);
+ return;
+ }
+ }
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
OpCmpBranch(cu, cond, rl_src1.low_reg, rl_src2.low_reg, taken);
OpUnconditionalBranch(cu, fall_through);
}
@@ -151,12 +189,7 @@
cond = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected opcode " << opcode;
}
- if (cu->instruction_set == kThumb2) {
- OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
- OpCondBranch(cu, cond, taken);
- } else {
- OpCmpImmBranch(cu, cond, rl_src.low_reg, 0, taken);
- }
+ OpCmpImmBranch(cu, cond, rl_src.low_reg, 0, taken);
OpUnconditionalBranch(cu, fall_through);
}
@@ -668,7 +701,7 @@
int reg_ptr = AllocTemp(cu);
OpRegRegImm(cu, kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
rl_result = EvalLoc(cu, rl_dest, reg_class, true);
- LoadPair(cu, reg_ptr, rl_result.low_reg, rl_result.high_reg);
+ LoadBaseDispWide(cu, reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
if (is_volatile) {
GenMemBarrier(cu, kLoadLoad);
}
@@ -1056,270 +1089,6 @@
branch2->target = target;
}
-/*
- * Generate array store
- *
- */
-void Codegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_src, int scale)
-{
- int len_offset = Array::LengthOffset().Int32Value();
- int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value();
-
- FlushAllRegs(cu); // Use explicit registers
- LockCallTemps(cu);
-
- int r_value = TargetReg(kArg0); // Register holding value
- int r_array_class = TargetReg(kArg1); // Register holding array's Class
- int r_array = TargetReg(kArg2); // Register holding array
- int r_index = TargetReg(kArg3); // Register holding index into array
-
- LoadValueDirectFixed(cu, rl_array, r_array); // Grab array
- LoadValueDirectFixed(cu, rl_src, r_value); // Grab value
- LoadValueDirectFixed(cu, rl_index, r_index); // Grab index
-
- GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags); // NPE?
-
- // Store of null?
- LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
-
- // Get the array's class.
- LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class);
- CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
- r_array_class, true);
- // Redo LoadValues in case they didn't survive the call.
- LoadValueDirectFixed(cu, rl_array, r_array); // Reload array
- LoadValueDirectFixed(cu, rl_index, r_index); // Reload index
- LoadValueDirectFixed(cu, rl_src, r_value); // Reload value
- r_array_class = INVALID_REG;
-
- // Branch here if value to be stored == null
- LIR* target = NewLIR0(cu, kPseudoTargetLabel);
- null_value_check->target = target;
-
- if (cu->instruction_set == kX86) {
- // make an extra temp available for card mark below
- FreeTemp(cu, TargetReg(kArg1));
- if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
- /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
- GenRegMemCheck(cu, kCondUge, r_index, r_array, len_offset, kThrowArrayBounds);
- }
- StoreBaseIndexedDisp(cu, r_array, r_index, scale,
- data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
- } else {
- bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- int reg_len = INVALID_REG;
- if (needs_range_check) {
- reg_len = TargetReg(kArg1);
- LoadWordDisp(cu, r_array, len_offset, reg_len); // Get len
- }
- /* r_ptr -> array data */
- int r_ptr = AllocTemp(cu);
- OpRegRegImm(cu, kOpAdd, r_ptr, r_array, data_offset);
- if (needs_range_check) {
- GenRegRegCheck(cu, kCondCs, r_index, reg_len, kThrowArrayBounds);
- }
- StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
- FreeTemp(cu, r_ptr);
- }
- FreeTemp(cu, r_index);
- MarkGCCard(cu, r_value, r_array);
-}
-
-/*
- * Generate array load
- */
-void Codegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_dest, int scale)
-{
- RegisterClass reg_class = oat_reg_class_by_size(size);
- int len_offset = Array::LengthOffset().Int32Value();
- int data_offset;
- RegLocation rl_result;
- rl_array = LoadValue(cu, rl_array, kCoreReg);
- rl_index = LoadValue(cu, rl_index, kCoreReg);
-
- if (size == kLong || size == kDouble) {
- data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
- } else {
- data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
- }
-
- /* null object? */
- GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
-
- if (cu->instruction_set == kX86) {
- if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
- /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
- GenRegMemCheck(cu, kCondUge, rl_index.low_reg, rl_array.low_reg,
- len_offset, kThrowArrayBounds);
- }
- if ((size == kLong) || (size == kDouble)) {
- int reg_addr = AllocTemp(cu);
- OpLea(cu, reg_addr, rl_array.low_reg, rl_index.low_reg, scale, data_offset);
- FreeTemp(cu, rl_array.low_reg);
- FreeTemp(cu, rl_index.low_reg);
- rl_result = EvalLoc(cu, rl_dest, reg_class, true);
- LoadBaseIndexedDisp(cu, reg_addr, INVALID_REG, 0, 0, rl_result.low_reg,
- rl_result.high_reg, size, INVALID_SREG);
- StoreValueWide(cu, rl_dest, rl_result);
- } else {
- rl_result = EvalLoc(cu, rl_dest, reg_class, true);
-
- LoadBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale,
- data_offset, rl_result.low_reg, INVALID_REG, size,
- INVALID_SREG);
-
- StoreValue(cu, rl_dest, rl_result);
- }
- } else {
- int reg_ptr = AllocTemp(cu);
- bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- int reg_len = INVALID_REG;
- if (needs_range_check) {
- reg_len = AllocTemp(cu);
- /* Get len */
- LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
- }
- /* reg_ptr -> array data */
- OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
- FreeTemp(cu, rl_array.low_reg);
- if ((size == kLong) || (size == kDouble)) {
- if (scale) {
- int r_new_index = AllocTemp(cu);
- OpRegRegImm(cu, kOpLsl, r_new_index, rl_index.low_reg, scale);
- OpRegReg(cu, kOpAdd, reg_ptr, r_new_index);
- FreeTemp(cu, r_new_index);
- } else {
- OpRegReg(cu, kOpAdd, reg_ptr, rl_index.low_reg);
- }
- FreeTemp(cu, rl_index.low_reg);
- rl_result = EvalLoc(cu, rl_dest, reg_class, true);
-
- if (needs_range_check) {
- // TODO: change kCondCS to a more meaningful name, is the sense of
- // carry-set/clear flipped?
- GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
- FreeTemp(cu, reg_len);
- }
- LoadPair(cu, reg_ptr, rl_result.low_reg, rl_result.high_reg);
-
- FreeTemp(cu, reg_ptr);
- StoreValueWide(cu, rl_dest, rl_result);
- } else {
- rl_result = EvalLoc(cu, rl_dest, reg_class, true);
-
- if (needs_range_check) {
- // TODO: change kCondCS to a more meaningful name, is the sense of
- // carry-set/clear flipped?
- GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
- FreeTemp(cu, reg_len);
- }
- LoadBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
-
- FreeTemp(cu, reg_ptr);
- StoreValue(cu, rl_dest, rl_result);
- }
- }
-}
-
-/*
- * Generate array store
- *
- */
-void Codegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_src, int scale)
-{
- RegisterClass reg_class = oat_reg_class_by_size(size);
- int len_offset = Array::LengthOffset().Int32Value();
- int data_offset;
-
- if (size == kLong || size == kDouble) {
- data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
- } else {
- data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
- }
-
- rl_array = LoadValue(cu, rl_array, kCoreReg);
- rl_index = LoadValue(cu, rl_index, kCoreReg);
- int reg_ptr = INVALID_REG;
- if (cu->instruction_set != kX86) {
- if (IsTemp(cu, rl_array.low_reg)) {
- Clobber(cu, rl_array.low_reg);
- reg_ptr = rl_array.low_reg;
- } else {
- reg_ptr = AllocTemp(cu);
- OpRegCopy(cu, reg_ptr, rl_array.low_reg);
- }
- }
-
- /* null object? */
- GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
-
- if (cu->instruction_set == kX86) {
- if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
- /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
- GenRegMemCheck(cu, kCondUge, rl_index.low_reg, rl_array.low_reg, len_offset, kThrowArrayBounds);
- }
- if ((size == kLong) || (size == kDouble)) {
- rl_src = LoadValueWide(cu, rl_src, reg_class);
- } else {
- rl_src = LoadValue(cu, rl_src, reg_class);
- }
- // If the src reg can't be byte accessed, move it to a temp first.
- if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
- int temp = AllocTemp(cu);
- OpRegCopy(cu, temp, rl_src.low_reg);
- StoreBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
- INVALID_REG, size, INVALID_SREG);
- } else {
- StoreBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
- rl_src.high_reg, size, INVALID_SREG);
- }
- } else {
- bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- int reg_len = INVALID_REG;
- if (needs_range_check) {
- reg_len = AllocTemp(cu);
- //NOTE: max live temps(4) here.
- /* Get len */
- LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
- }
- /* reg_ptr -> array data */
- OpRegImm(cu, kOpAdd, reg_ptr, data_offset);
- /* at this point, reg_ptr points to array, 2 live temps */
- if ((size == kLong) || (size == kDouble)) {
- //TUNING: specific wide routine that can handle fp regs
- if (scale) {
- int r_new_index = AllocTemp(cu);
- OpRegRegImm(cu, kOpLsl, r_new_index, rl_index.low_reg, scale);
- OpRegReg(cu, kOpAdd, reg_ptr, r_new_index);
- FreeTemp(cu, r_new_index);
- } else {
- OpRegReg(cu, kOpAdd, reg_ptr, rl_index.low_reg);
- }
- rl_src = LoadValueWide(cu, rl_src, reg_class);
-
- if (needs_range_check) {
- GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
- FreeTemp(cu, reg_len);
- }
-
- StoreBaseDispWide(cu, reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
-
- FreeTemp(cu, reg_ptr);
- } else {
- rl_src = LoadValue(cu, rl_src, reg_class);
- if (needs_range_check) {
- GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
- FreeTemp(cu, reg_len);
- }
- StoreBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_src.low_reg,
- scale, size);
- }
- }
-}
-
void Codegen::GenLong3Addr(CompilationUnit* cu, OpKind first_op, OpKind second_op,
RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
{
@@ -1683,10 +1452,18 @@
break;
}
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ lit = -lit;
+ // Intended fallthrough
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
case Instruction::ADD_INT_LIT8:
case Instruction::ADD_INT_LIT16:
op = kOpAdd;
break;
+ case Instruction::MUL_INT:
+ case Instruction::MUL_INT_2ADDR:
case Instruction::MUL_INT_LIT8:
case Instruction::MUL_INT_LIT16: {
if (HandleEasyMultiply(cu, rl_src, rl_dest, lit)) {
@@ -1695,39 +1472,52 @@
op = kOpMul;
break;
}
+ case Instruction::AND_INT:
+ case Instruction::AND_INT_2ADDR:
case Instruction::AND_INT_LIT8:
case Instruction::AND_INT_LIT16:
op = kOpAnd;
break;
+ case Instruction::OR_INT:
+ case Instruction::OR_INT_2ADDR:
case Instruction::OR_INT_LIT8:
case Instruction::OR_INT_LIT16:
op = kOpOr;
break;
+ case Instruction::XOR_INT:
+ case Instruction::XOR_INT_2ADDR:
case Instruction::XOR_INT_LIT8:
case Instruction::XOR_INT_LIT16:
op = kOpXor;
break;
case Instruction::SHL_INT_LIT8:
case Instruction::SHL_INT:
+ case Instruction::SHL_INT_2ADDR:
lit &= 31;
shift_op = true;
op = kOpLsl;
break;
case Instruction::SHR_INT_LIT8:
case Instruction::SHR_INT:
+ case Instruction::SHR_INT_2ADDR:
lit &= 31;
shift_op = true;
op = kOpAsr;
break;
case Instruction::USHR_INT_LIT8:
case Instruction::USHR_INT:
+ case Instruction::USHR_INT_2ADDR:
lit &= 31;
shift_op = true;
op = kOpLsr;
break;
+ case Instruction::DIV_INT:
+ case Instruction::DIV_INT_2ADDR:
case Instruction::DIV_INT_LIT8:
case Instruction::DIV_INT_LIT16:
+ case Instruction::REM_INT:
+ case Instruction::REM_INT_2ADDR:
case Instruction::REM_INT_LIT8:
case Instruction::REM_INT_LIT16: {
if (lit == 0) {
@@ -1738,6 +1528,8 @@
return false;
}
if ((opcode == Instruction::DIV_INT_LIT8) ||
+ (opcode == Instruction::DIV_INT) ||
+ (opcode == Instruction::DIV_INT_2ADDR) ||
(opcode == Instruction::DIV_INT_LIT16)) {
is_div = true;
} else {
@@ -1762,7 +1554,7 @@
break;
}
default:
- return true;
+ LOG(FATAL) << "Unexpected opcode " << opcode;
}
rl_src = LoadValue(cu, rl_src, kCoreReg);
rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
diff --git a/src/compiler/codegen/gen_invoke.cc b/src/compiler/codegen/gen_invoke.cc
index ebc1a98..f354152 100644
--- a/src/compiler/codegen/gen_invoke.cc
+++ b/src/compiler/codegen/gen_invoke.cc
@@ -602,7 +602,10 @@
next_reg++;
next_arg++;
} else {
- rl_arg.wide = false;
+ if (rl_arg.wide) {
+ rl_arg.wide = false;
+ rl_arg.is_const = false;
+ }
cg->LoadValueDirectFixed(cu, rl_arg, next_reg);
}
call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
diff --git a/src/compiler/codegen/gen_loadstore.cc b/src/compiler/codegen/gen_loadstore.cc
index fe08caa..eec74af 100644
--- a/src/compiler/codegen/gen_loadstore.cc
+++ b/src/compiler/codegen/gen_loadstore.cc
@@ -92,7 +92,11 @@
} else {
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
- LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_src.s_reg_low), r_dest);
+ if (rl_src.is_const && InexpensiveConstant(r_dest, cu->constant_values[rl_src.orig_sreg])) {
+ LoadConstantNoClobber(cu, r_dest, cu->constant_values[rl_src.orig_sreg]);
+ } else {
+ LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_src.s_reg_low), r_dest);
+ }
}
}
diff --git a/src/compiler/codegen/mips/codegen_mips.h b/src/compiler/codegen/mips/codegen_mips.h
index aaa03c0..4178f2e 100644
--- a/src/compiler/codegen/mips/codegen_mips.h
+++ b/src/compiler/codegen/mips/codegen_mips.h
@@ -39,7 +39,6 @@
virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
virtual LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
int val_lo, int val_hi);
- virtual void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg);
virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
OpSize size);
virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
@@ -90,6 +89,12 @@
virtual bool IsUnconditionalBranch(LIR* lir);
// Required for target - Dalvik-level generators.
+ virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
+ virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale);
+ virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
virtual bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
virtual bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
@@ -184,6 +189,7 @@
void SpillCoreRegs(CompilationUnit* cu);
void UnSpillCoreRegs(CompilationUnit* cu);
static const MipsEncodingMap EncodingMap[kMipsLast];
+ bool InexpensiveConstant(int reg, int value);
};
} // namespace art
diff --git a/src/compiler/codegen/mips/int_mips.cc b/src/compiler/codegen/mips/int_mips.cc
index bb36dc1..e2a5a02 100644
--- a/src/compiler/codegen/mips/int_mips.cc
+++ b/src/compiler/codegen/mips/int_mips.cc
@@ -432,4 +432,207 @@
return false;
}
+/*
+ * Generate array load
+ */
+void MipsCodegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset;
+ RegLocation rl_result;
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+
+ if (size == kLong || size == kDouble) {
+ data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ int reg_ptr = AllocTemp(cu);
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = AllocTemp(cu);
+ /* Get len */
+ LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+ }
+ /* reg_ptr -> array data */
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ FreeTemp(cu, rl_array.low_reg);
+ if ((size == kLong) || (size == kDouble)) {
+ if (scale) {
+ int r_new_index = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, r_new_index, rl_index.low_reg, scale);
+ OpRegReg(cu, kOpAdd, reg_ptr, r_new_index);
+ FreeTemp(cu, r_new_index);
+ } else {
+ OpRegReg(cu, kOpAdd, reg_ptr, rl_index.low_reg);
+ }
+ FreeTemp(cu, rl_index.low_reg);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ // TODO: change kCondCS to a more meaningful name, is the sense of
+ // carry-set/clear flipped?
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ LoadBaseDispWide(cu, reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+
+ FreeTemp(cu, reg_ptr);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ // TODO: change kCondCS to a more meaningful name, is the sense of
+ // carry-set/clear flipped?
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ LoadBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+
+ FreeTemp(cu, reg_ptr);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void MipsCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset;
+
+ if (size == kLong || size == kDouble) {
+ data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+ int reg_ptr = INVALID_REG;
+ if (IsTemp(cu, rl_array.low_reg)) {
+ Clobber(cu, rl_array.low_reg);
+ reg_ptr = rl_array.low_reg;
+ } else {
+ reg_ptr = AllocTemp(cu);
+ OpRegCopy(cu, reg_ptr, rl_array.low_reg);
+ }
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = AllocTemp(cu);
+ //NOTE: max live temps(4) here.
+ /* Get len */
+ LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+ }
+ /* reg_ptr -> array data */
+ OpRegImm(cu, kOpAdd, reg_ptr, data_offset);
+ /* at this point, reg_ptr points to array, 2 live temps */
+ if ((size == kLong) || (size == kDouble)) {
+ //TUNING: specific wide routine that can handle fp regs
+ if (scale) {
+ int r_new_index = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, r_new_index, rl_index.low_reg, scale);
+ OpRegReg(cu, kOpAdd, reg_ptr, r_new_index);
+ FreeTemp(cu, r_new_index);
+ } else {
+ OpRegReg(cu, kOpAdd, reg_ptr, rl_index.low_reg);
+ }
+ rl_src = LoadValueWide(cu, rl_src, reg_class);
+
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+
+ StoreBaseDispWide(cu, reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
+
+ FreeTemp(cu, reg_ptr);
+ } else {
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ StoreBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_src.low_reg,
+ scale, size);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void MipsCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value();
+
+ FlushAllRegs(cu); // Use explicit registers
+ LockCallTemps(cu);
+
+ int r_value = TargetReg(kArg0); // Register holding value
+ int r_array_class = TargetReg(kArg1); // Register holding array's Class
+ int r_array = TargetReg(kArg2); // Register holding array
+ int r_index = TargetReg(kArg3); // Register holding index into array
+
+ LoadValueDirectFixed(cu, rl_array, r_array); // Grab array
+ LoadValueDirectFixed(cu, rl_src, r_value); // Grab value
+ LoadValueDirectFixed(cu, rl_index, r_index); // Grab index
+
+ GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags); // NPE?
+
+ // Store of null?
+ LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
+
+ // Get the array's class.
+ LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ r_array_class, true);
+ // Redo LoadValues in case they didn't survive the call.
+ LoadValueDirectFixed(cu, rl_array, r_array); // Reload array
+ LoadValueDirectFixed(cu, rl_index, r_index); // Reload index
+ LoadValueDirectFixed(cu, rl_src, r_value); // Reload value
+ r_array_class = INVALID_REG;
+
+ // Branch here if value to be stored == null
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ null_value_check->target = target;
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = TargetReg(kArg1);
+ LoadWordDisp(cu, r_array, len_offset, reg_len); // Get len
+ }
+ /* r_ptr -> array data */
+ int r_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, r_ptr, r_array, data_offset);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, r_index, reg_len, kThrowArrayBounds);
+ }
+ StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
+ FreeTemp(cu, r_ptr);
+ FreeTemp(cu, r_index);
+ MarkGCCard(cu, r_value, r_array);
+}
+
} // namespace art
diff --git a/src/compiler/codegen/mips/utility_mips.cc b/src/compiler/codegen/mips/utility_mips.cc
index 44d75d1..4d4be76 100644
--- a/src/compiler/codegen/mips/utility_mips.cc
+++ b/src/compiler/codegen/mips/utility_mips.cc
@@ -52,6 +52,19 @@
return res;
}
+bool MipsCodegen::InexpensiveConstant(int reg, int value)
+{
+ bool res = false;
+ if (value == 0) {
+ res = true;
+ } else if (IsUint(16, value)) {
+ res = true;
+ } else if ((value < 0) && (value >= -32768)) {
+ res = true;
+ }
+ return res;
+}
+
/*
* Load a immediate using a shortcut if possible; otherwise
* grab from the per-translation literal pool. If target is
@@ -640,12 +653,6 @@
return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
}
-void MipsCodegen::LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg)
-{
- LoadWordDisp(cu, base, LOWORD_OFFSET , low_reg);
- LoadWordDisp(cu, base, HIWORD_OFFSET , high_reg);
-}
-
LIR* MipsCodegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
{
LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS";
diff --git a/src/compiler/codegen/mir_to_lir.cc b/src/compiler/codegen/mir_to_lir.cc
index 6ec7edb..acdeafe 100644
--- a/src/compiler/codegen/mir_to_lir.cc
+++ b/src/compiler/codegen/mir_to_lir.cc
@@ -278,11 +278,22 @@
LIR* fall_through = &label_list[bb->fall_through->id];
bool backward_branch;
backward_branch = (bb->taken->start_offset <= mir->offset);
- if (backward_branch) {
- cg->GenSuspendTest(cu, opt_flags);
+ // Result known at compile time?
+ if (rl_src[0].is_const && rl_src[1].is_const) {
+ bool is_taken = EvaluateBranch(opcode, cu->constant_values[rl_src[0].orig_sreg],
+ cu->constant_values[rl_src[1].orig_sreg]);
+ if (is_taken && backward_branch) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ int id = is_taken ? bb->taken->id : bb->fall_through->id;
+ cg->OpUnconditionalBranch(cu, &label_list[id]);
+ } else {
+ if (backward_branch) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ cg->GenCompareAndBranch(cu, opcode, rl_src[0], rl_src[1], taken,
+ fall_through);
}
- cg->GenCompareAndBranch(cu, opcode, rl_src[0], rl_src[1], taken,
- fall_through);
break;
}
@@ -296,10 +307,20 @@
LIR* fall_through = &label_list[bb->fall_through->id];
bool backward_branch;
backward_branch = (bb->taken->start_offset <= mir->offset);
- if (backward_branch) {
- cg->GenSuspendTest(cu, opt_flags);
+ // Result known at compile time?
+ if (rl_src[0].is_const) {
+ bool is_taken = EvaluateBranch(opcode, cu->constant_values[rl_src[0].orig_sreg], 0);
+ if (is_taken && backward_branch) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ int id = is_taken ? bb->taken->id : bb->fall_through->id;
+ cg->OpUnconditionalBranch(cu, &label_list[id]);
+ } else {
+ if (backward_branch) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ cg->GenCompareZeroAndBranch(cu, opcode, rl_src[0], taken, fall_through);
}
- cg->GenCompareZeroAndBranch(cu, opcode, rl_src[0], taken, fall_through);
break;
}
@@ -504,29 +525,49 @@
cg->GenConversion(cu, opcode, rl_dest, rl_src[0]);
break;
+
case Instruction::ADD_INT:
- case Instruction::SUB_INT:
- case Instruction::MUL_INT:
- case Instruction::DIV_INT:
- case Instruction::REM_INT:
- case Instruction::AND_INT:
- case Instruction::OR_INT:
- case Instruction::XOR_INT:
- case Instruction::SHL_INT:
- case Instruction::SHR_INT:
- case Instruction::USHR_INT:
case Instruction::ADD_INT_2ADDR:
- case Instruction::SUB_INT_2ADDR:
+ case Instruction::MUL_INT:
case Instruction::MUL_INT_2ADDR:
- case Instruction::DIV_INT_2ADDR:
- case Instruction::REM_INT_2ADDR:
+ case Instruction::AND_INT:
case Instruction::AND_INT_2ADDR:
+ case Instruction::OR_INT:
case Instruction::OR_INT_2ADDR:
+ case Instruction::XOR_INT:
case Instruction::XOR_INT_2ADDR:
+ if (rl_src[0].is_const &&
+ cu->cg->InexpensiveConstant(0, cu->constant_values[rl_src[0].orig_sreg])) {
+ cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[1],
+ cu->constant_values[rl_src[0].orig_sreg]);
+ } else if (rl_src[1].is_const &&
+ cu->cg->InexpensiveConstant(0, cu->constant_values[rl_src[1].orig_sreg])) {
+ cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0],
+ cu->constant_values[rl_src[1].orig_sreg]);
+ } else {
+ cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ }
+ break;
+
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ case Instruction::DIV_INT:
+ case Instruction::DIV_INT_2ADDR:
+ case Instruction::REM_INT:
+ case Instruction::REM_INT_2ADDR:
+ case Instruction::SHL_INT:
case Instruction::SHL_INT_2ADDR:
+ case Instruction::SHR_INT:
case Instruction::SHR_INT_2ADDR:
+ case Instruction::USHR_INT:
case Instruction::USHR_INT_2ADDR:
- cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ if (rl_src[1].is_const &&
+ cu->cg->InexpensiveConstant(0, cu->constant_values[rl_src[1].orig_sreg])) {
+ cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0],
+ cu->constant_values[rl_src[1].orig_sreg]);
+ } else {
+ cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ }
break;
case Instruction::ADD_LONG:
diff --git a/src/compiler/codegen/ralloc_util.cc b/src/compiler/codegen/ralloc_util.cc
index 1a3a413..accf676 100644
--- a/src/compiler/codegen/ralloc_util.cc
+++ b/src/compiler/codegen/ralloc_util.cc
@@ -1091,12 +1091,18 @@
RegLocation loc = cu->reg_location[i];
RefCounts* counts = loc.fp ? fp_counts : core_counts;
int p_map_idx = SRegToPMap(cu, loc.s_reg_low);
+ int sample_reg = loc.fp ? cu->reg_pool->FPRegs[0].reg : cu->reg_pool->core_regs[0].reg;
+ bool simple_immediate = loc.is_const &&
+ !cu->cg->InexpensiveConstant(sample_reg, cu->constant_values[loc.orig_sreg]);
if (loc.defined) {
- counts[p_map_idx].count += cu->use_counts.elem_list[i];
+ // Don't count easily regenerated immediates
+ if (!simple_immediate) {
+ counts[p_map_idx].count += cu->use_counts.elem_list[i];
+ }
}
if (loc.wide) {
if (loc.defined) {
- if (loc.fp) {
+ if (loc.fp && !simple_immediate) {
counts[p_map_idx].double_start = true;
counts[p_map_idx+1].count += cu->use_counts.elem_list[i+1];
}
diff --git a/src/compiler/codegen/x86/codegen_x86.h b/src/compiler/codegen/x86/codegen_x86.h
index 4ef186a..f467e83 100644
--- a/src/compiler/codegen/x86/codegen_x86.h
+++ b/src/compiler/codegen/x86/codegen_x86.h
@@ -40,7 +40,6 @@
virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
virtual LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
int val_lo, int val_hi);
- virtual void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg);
virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
OpSize size);
virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
@@ -91,6 +90,12 @@
virtual bool IsUnconditionalBranch(LIR* lir);
// Required for target - Dalvik-level generators.
+ virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
+ virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale);
+ virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
virtual bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
virtual bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
@@ -182,6 +187,7 @@
void SpillCoreRegs(CompilationUnit* cu);
void UnSpillCoreRegs(CompilationUnit* cu);
static const X86EncodingMap EncodingMap[kX86Last];
+ bool InexpensiveConstant(int reg, int value);
};
} // namespace art
diff --git a/src/compiler/codegen/x86/int_x86.cc b/src/compiler/codegen/x86/int_x86.cc
index bd3a7fa..0f1fc53 100644
--- a/src/compiler/codegen/x86/int_x86.cc
+++ b/src/compiler/codegen/x86/int_x86.cc
@@ -439,4 +439,148 @@
NewLIR2(cu, opcode, r_dest, thread_offset);
}
+/*
+ * Generate array load
+ */
+void X86Codegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset;
+ RegLocation rl_result;
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+
+ if (size == kLong || size == kDouble) {
+ data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+ GenRegMemCheck(cu, kCondUge, rl_index.low_reg, rl_array.low_reg,
+ len_offset, kThrowArrayBounds);
+ }
+ if ((size == kLong) || (size == kDouble)) {
+ int reg_addr = AllocTemp(cu);
+ OpLea(cu, reg_addr, rl_array.low_reg, rl_index.low_reg, scale, data_offset);
+ FreeTemp(cu, rl_array.low_reg);
+ FreeTemp(cu, rl_index.low_reg);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+ LoadBaseIndexedDisp(cu, reg_addr, INVALID_REG, 0, 0, rl_result.low_reg,
+ rl_result.high_reg, size, INVALID_SREG);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+
+ LoadBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale,
+ data_offset, rl_result.low_reg, INVALID_REG, size,
+ INVALID_SREG);
+
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void X86Codegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset;
+
+ if (size == kLong || size == kDouble) {
+ data_offset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+ GenRegMemCheck(cu, kCondUge, rl_index.low_reg, rl_array.low_reg, len_offset, kThrowArrayBounds);
+ }
+ if ((size == kLong) || (size == kDouble)) {
+ rl_src = LoadValueWide(cu, rl_src, reg_class);
+ } else {
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ }
+ // If the src reg can't be byte accessed, move it to a temp first.
+ if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
+ int temp = AllocTemp(cu);
+ OpRegCopy(cu, temp, rl_src.low_reg);
+ StoreBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
+ INVALID_REG, size, INVALID_SREG);
+ } else {
+ StoreBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
+ rl_src.high_reg, size, INVALID_SREG);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void X86Codegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ int len_offset = Array::LengthOffset().Int32Value();
+ int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value();
+
+ FlushAllRegs(cu); // Use explicit registers
+ LockCallTemps(cu);
+
+ int r_value = TargetReg(kArg0); // Register holding value
+ int r_array_class = TargetReg(kArg1); // Register holding array's Class
+ int r_array = TargetReg(kArg2); // Register holding array
+ int r_index = TargetReg(kArg3); // Register holding index into array
+
+ LoadValueDirectFixed(cu, rl_array, r_array); // Grab array
+ LoadValueDirectFixed(cu, rl_src, r_value); // Grab value
+ LoadValueDirectFixed(cu, rl_index, r_index); // Grab index
+
+ GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags); // NPE?
+
+ // Store of null?
+ LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
+
+ // Get the array's class.
+ LoadWordDisp(cu, r_array, Object::ClassOffset().Int32Value(), r_array_class);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ r_array_class, true);
+ // Redo LoadValues in case they didn't survive the call.
+ LoadValueDirectFixed(cu, rl_array, r_array); // Reload array
+ LoadValueDirectFixed(cu, rl_index, r_index); // Reload index
+ LoadValueDirectFixed(cu, rl_src, r_value); // Reload value
+ r_array_class = INVALID_REG;
+
+ // Branch here if value to be stored == null
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ null_value_check->target = target;
+
+ // make an extra temp available for card mark below
+ FreeTemp(cu, TargetReg(kArg1));
+ if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+ GenRegMemCheck(cu, kCondUge, r_index, r_array, len_offset, kThrowArrayBounds);
+ }
+ StoreBaseIndexedDisp(cu, r_array, r_index, scale,
+ data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
+ FreeTemp(cu, r_index);
+ MarkGCCard(cu, r_value, r_array);
+}
+
} // namespace art
diff --git a/src/compiler/codegen/x86/utility_x86.cc b/src/compiler/codegen/x86/utility_x86.cc
index bdbc547..ce55b4b 100644
--- a/src/compiler/codegen/x86/utility_x86.cc
+++ b/src/compiler/codegen/x86/utility_x86.cc
@@ -50,6 +50,11 @@
return res;
}
+bool X86Codegen::InexpensiveConstant(int reg, int value)
+{
+ return true;
+}
+
/*
* Load a immediate using a shortcut if possible; otherwise
* grab from the per-translation literal pool. If target is
@@ -559,9 +564,4 @@
r_src_lo, r_src_hi, kLong, INVALID_SREG);
}
-void X86Codegen::LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg)
-{
- LoadBaseDispWide(cu, base, 0, low_reg, high_reg, INVALID_SREG);
-}
-
} // namespace art
diff --git a/src/constants_mips.h b/src/constants_mips.h
index 32fa158..08661e9 100644
--- a/src/constants_mips.h
+++ b/src/constants_mips.h
@@ -27,14 +27,14 @@
enum Register {
ZERO = 0,
- AT = 1,
- V0 = 2,
+ AT = 1, // Assembler temporary.
+ V0 = 2, // Values.
V1 = 3,
- A0 = 4,
+ A0 = 4, // Arguments.
A1 = 5,
A2 = 6,
A3 = 7,
- T0 = 8,
+ T0 = 8, // Temporaries.
T1 = 9,
T2 = 10,
T3 = 11,
@@ -42,7 +42,7 @@
T5 = 13,
T6 = 14,
T7 = 15,
- S0 = 16,
+ S0 = 16, // Saved values.
S1 = 17,
S2 = 18,
S3 = 19,
@@ -50,14 +50,14 @@
S5 = 21,
S6 = 22,
S7 = 23,
- T8 = 24,
+ T8 = 24, // More temporaries.
T9 = 25,
- K0 = 26,
+ K0 = 26, // Reserved for trap handler.
K1 = 27,
- GP = 28,
- SP = 29,
- FP = 30,
- RA = 31,
+ GP = 28, // Global pointer.
+ SP = 29, // Stack pointer.
+ FP = 30, // Saved value/frame pointer.
+ RA = 31, // Return address.
kNumberOfCoreRegisters = 32,
kNoRegister = -1 // Signals an illegal register.
};
diff --git a/src/debugger.cc b/src/debugger.cc
index 672b660..87e9c72 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -224,6 +224,7 @@
}
static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId threadId)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Object* thread_peer = gRegistry->Get<Object*>(threadId);
@@ -1334,9 +1335,8 @@
}
bool Dbg::GetThreadName(JDWP::ObjectId threadId, std::string& name) {
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::thread_list_lock_);
- ScopedObjectAccessUnchecked soa(self);
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = DecodeThread(soa, threadId);
if (thread == NULL) {
return false;
@@ -1581,6 +1581,7 @@
int Dbg::GetThreadFrameCount(JDWP::ObjectId threadId) {
ScopedObjectAccess soa(Thread::Current());
+ MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
return GetStackDepth(DecodeThread(soa, threadId));
}
@@ -1624,6 +1625,7 @@
};
ScopedObjectAccessUnchecked soa(Thread::Current());
+ MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = DecodeThread(soa, thread_id); // Caller already checked thread is suspended.
GetFrameVisitor visitor(thread->GetManagedStack(), thread->GetInstrumentationStack(), start_frame, frame_count, buf);
visitor.WalkStack();
@@ -1669,8 +1671,11 @@
void Dbg::ResumeThread(JDWP::ObjectId threadId) {
ScopedObjectAccessUnchecked soa(Thread::Current());
Object* peer = gRegistry->Get<Object*>(threadId);
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
- Thread* thread = Thread::FromManagedThread(soa, peer);
+ Thread* thread;
+ {
+ MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
+ thread = Thread::FromManagedThread(soa, peer);
+ }
if (thread == NULL) {
LOG(WARNING) << "No such thread for resume: " << peer;
return;
@@ -1878,6 +1883,7 @@
};
ScopedObjectAccessUnchecked soa(Thread::Current());
+ MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = DecodeThread(soa, threadId);
UniquePtr<Context> context(Context::Create());
GetLocalVisitor visitor(thread->GetManagedStack(), thread->GetInstrumentationStack(), context.get(),
@@ -1961,6 +1967,7 @@
};
ScopedObjectAccessUnchecked soa(Thread::Current());
+ MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = DecodeThread(soa, threadId);
UniquePtr<Context> context(Context::Create());
SetLocalVisitor visitor(thread->GetManagedStack(), thread->GetInstrumentationStack(), context.get(),
@@ -2165,12 +2172,13 @@
JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize step_size,
JDWP::JdwpStepDepth step_depth) {
ScopedObjectAccessUnchecked soa(Thread::Current());
+ MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = DecodeThread(soa, threadId);
if (thread == NULL) {
return JDWP::ERR_INVALID_THREAD;
}
- MutexLock mu(soa.Self(), gBreakpointsLock);
+ MutexLock mu2(soa.Self(), gBreakpointsLock);
// TODO: there's no theoretical reason why we couldn't support single-stepping
// of multiple threads at once, but we never did so historically.
if (gSingleStepControl.thread != NULL && thread != gSingleStepControl.thread) {
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 5aa4c2d..636bb85 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -378,11 +378,6 @@
receiver = NULL;
} else {
receiver = shadow_frame.GetVRegReference(dec_insn.vC);
- if (UNLIKELY(receiver == NULL)) {
- ThrowNullPointerExceptionForMethodAccess(shadow_frame.GetMethod(), dec_insn.vB, type);
- result->SetJ(0);
- return;
- }
}
uint32_t method_idx = dec_insn.vB;
AbstractMethod* target_method = FindMethodFromCode(method_idx, receiver,
@@ -405,11 +400,6 @@
} else {
UnstartedRuntimeInvoke(self, target_method, receiver, arg_array.get(), result);
}
- // Check the return type if the result is non-null. We do the GetReturnType
- // after the null check to avoid resolution when there's an exception pending.
- if (result->GetL() != NULL && !mh.GetReturnType()->IsPrimitive()) {
- CHECK(mh.GetReturnType()->IsAssignableFrom(result->GetL()->GetClass()));
- }
mh.ChangeMethod(shadow_frame.GetMethod());
}
@@ -768,24 +758,48 @@
bool is_range = (dec_insn.opcode == Instruction::FILLED_NEW_ARRAY_RANGE);
int32_t length = dec_insn.vA;
CHECK(is_range || length <= 5);
+ if (UNLIKELY(length < 0)) {
+ self->ThrowNewExceptionF("Ljava/lang/NegativeArraySizeException;", "%d", length);
+ break;
+ }
Class* arrayClass = ResolveVerifyAndClinit(dec_insn.vB, shadow_frame.GetMethod(), self, false, true);
+ if (UNLIKELY(arrayClass == NULL)) {
+ CHECK(self->IsExceptionPending());
+ break;
+ }
CHECK(arrayClass->IsArrayClass());
- if (arrayClass->GetComponentType()->IsPrimitiveInt()) {
- IntArray* newArray = IntArray::Alloc(self, length);
- if (newArray != NULL) {
- for (int32_t i = 0; i < length; ++i) {
- if (is_range) {
- newArray->Set(i, shadow_frame.GetVReg(dec_insn.vC + i));
+ Class* componentClass = arrayClass->GetComponentType();
+ if (UNLIKELY(componentClass->IsPrimitive() && !componentClass->IsPrimitiveInt())) {
+ if (componentClass->IsPrimitiveLong() || componentClass->IsPrimitiveDouble()) {
+ self->ThrowNewExceptionF("Ljava/lang/RuntimeException;",
+ "Bad filled array request for type %s",
+ PrettyDescriptor(componentClass).c_str());
+ } else {
+ self->ThrowNewExceptionF("Ljava/lang/InternalError;",
+ "Found type %s; filled-new-array not implemented for anything but \'int\'",
+ PrettyDescriptor(componentClass).c_str());
+ }
+ break;
+ }
+ Object* newArray = Array::Alloc(self, arrayClass, length);
+ if (newArray != NULL) {
+ for (int32_t i = 0; i < length; ++i) {
+ if (is_range) {
+ if (componentClass->IsPrimitiveInt()) {
+ newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(dec_insn.vC + i));
} else {
- newArray->Set(i, shadow_frame.GetVReg(dec_insn.arg[i]));
+ newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(dec_insn.vC + i));
+ }
+ } else {
+ if (componentClass->IsPrimitiveInt()) {
+ newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(dec_insn.arg[i]));
+ } else {
+ newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(dec_insn.arg[i]));
}
}
}
- result_register.SetL(newArray);
- } else {
- UNIMPLEMENTED(FATAL) << inst->DumpString(&mh.GetDexFile())
- << " for array type: " << PrettyDescriptor(arrayClass);
}
+ result_register.SetL(newArray);
break;
}
case Instruction::CMPL_FLOAT: {
@@ -1287,13 +1301,13 @@
shadow_frame.SetVReg(dec_insn.vA, -shadow_frame.GetVReg(dec_insn.vB));
break;
case Instruction::NOT_INT:
- shadow_frame.SetVReg(dec_insn.vA, 0 ^ shadow_frame.GetVReg(dec_insn.vB));
+ shadow_frame.SetVReg(dec_insn.vA, ~shadow_frame.GetVReg(dec_insn.vB));
break;
case Instruction::NEG_LONG:
shadow_frame.SetVRegLong(dec_insn.vA, -shadow_frame.GetVRegLong(dec_insn.vB));
break;
case Instruction::NOT_LONG:
- shadow_frame.SetVRegLong(dec_insn.vA, 0 ^ shadow_frame.GetVRegLong(dec_insn.vB));
+ shadow_frame.SetVRegLong(dec_insn.vA, ~shadow_frame.GetVRegLong(dec_insn.vB));
break;
case Instruction::NEG_FLOAT:
shadow_frame.SetVRegFloat(dec_insn.vA, -shadow_frame.GetVRegFloat(dec_insn.vB));
@@ -1407,17 +1421,17 @@
shadow_frame.GetVReg(dec_insn.vC));
break;
case Instruction::SHL_INT:
- shadow_frame.SetVReg(dec_insn.vA,
- shadow_frame.GetVReg(dec_insn.vB) << shadow_frame.GetVReg(dec_insn.vC));
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) <<
+ (shadow_frame.GetVReg(dec_insn.vC) & 0x1f));
break;
case Instruction::SHR_INT:
- shadow_frame.SetVReg(dec_insn.vA,
- shadow_frame.GetVReg(dec_insn.vB) >> shadow_frame.GetVReg(dec_insn.vC));
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) >>
+ (shadow_frame.GetVReg(dec_insn.vC) & 0x1f));
break;
case Instruction::USHR_INT:
shadow_frame.SetVReg(dec_insn.vA,
static_cast<uint32_t>(shadow_frame.GetVReg(dec_insn.vB)) >>
- shadow_frame.GetVReg(dec_insn.vC));
+ (shadow_frame.GetVReg(dec_insn.vC) & 0x1f));
break;
case Instruction::AND_INT:
shadow_frame.SetVReg(dec_insn.vA,
@@ -1472,17 +1486,17 @@
case Instruction::SHL_LONG:
shadow_frame.SetVRegLong(dec_insn.vA,
shadow_frame.GetVRegLong(dec_insn.vB) <<
- shadow_frame.GetVReg(dec_insn.vC));
+ (shadow_frame.GetVReg(dec_insn.vC) & 0x3f));
break;
case Instruction::SHR_LONG:
shadow_frame.SetVRegLong(dec_insn.vA,
shadow_frame.GetVRegLong(dec_insn.vB) >>
- shadow_frame.GetVReg(dec_insn.vC));
+ (shadow_frame.GetVReg(dec_insn.vC) & 0x3f));
break;
case Instruction::USHR_LONG:
shadow_frame.SetVRegLong(dec_insn.vA,
static_cast<uint64_t>(shadow_frame.GetVRegLong(dec_insn.vB)) >>
- shadow_frame.GetVReg(dec_insn.vC));
+ (shadow_frame.GetVReg(dec_insn.vC) & 0x3f));
break;
case Instruction::ADD_FLOAT:
shadow_frame.SetVRegFloat(dec_insn.vA,
@@ -1551,17 +1565,17 @@
shadow_frame.GetVReg(dec_insn.vB));
break;
case Instruction::SHL_INT_2ADDR:
- shadow_frame.SetVReg(dec_insn.vA,
- shadow_frame.GetVReg(dec_insn.vA) << shadow_frame.GetVReg(dec_insn.vB));
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vA) <<
+ (shadow_frame.GetVReg(dec_insn.vB) & 0x1f));
break;
case Instruction::SHR_INT_2ADDR:
- shadow_frame.SetVReg(dec_insn.vA,
- shadow_frame.GetVReg(dec_insn.vA) >> shadow_frame.GetVReg(dec_insn.vB));
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vA) >>
+ (shadow_frame.GetVReg(dec_insn.vB) & 0x1f));
break;
case Instruction::USHR_INT_2ADDR:
shadow_frame.SetVReg(dec_insn.vA,
static_cast<uint32_t>(shadow_frame.GetVReg(dec_insn.vA)) >>
- shadow_frame.GetVReg(dec_insn.vB));
+ (shadow_frame.GetVReg(dec_insn.vB) & 0x1f));
break;
case Instruction::AND_INT_2ADDR:
shadow_frame.SetVReg(dec_insn.vA,
@@ -1620,17 +1634,17 @@
case Instruction::SHL_LONG_2ADDR:
shadow_frame.SetVRegLong(dec_insn.vA,
shadow_frame.GetVRegLong(dec_insn.vA) <<
- shadow_frame.GetVReg(dec_insn.vB));
+ (shadow_frame.GetVReg(dec_insn.vB) & 0x3f));
break;
case Instruction::SHR_LONG_2ADDR:
shadow_frame.SetVRegLong(dec_insn.vA,
shadow_frame.GetVRegLong(dec_insn.vA) >>
- shadow_frame.GetVReg(dec_insn.vB));
+ (shadow_frame.GetVReg(dec_insn.vB) & 0x3f));
break;
case Instruction::USHR_LONG_2ADDR:
shadow_frame.SetVRegLong(dec_insn.vA,
static_cast<uint64_t>(shadow_frame.GetVRegLong(dec_insn.vA)) >>
- shadow_frame.GetVReg(dec_insn.vB));
+ (shadow_frame.GetVReg(dec_insn.vB) & 0x3f));
break;
case Instruction::ADD_FLOAT_2ADDR:
shadow_frame.SetVRegFloat(dec_insn.vA,
@@ -1717,15 +1731,17 @@
shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) ^ dec_insn.vC);
break;
case Instruction::SHL_INT_LIT8:
- shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) << dec_insn.vC);
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) <<
+ (dec_insn.vC & 0x1f));
break;
case Instruction::SHR_INT_LIT8:
- shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) >> dec_insn.vC);
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) >>
+ (dec_insn.vC & 0x1f));
break;
case Instruction::USHR_INT_LIT8:
shadow_frame.SetVReg(dec_insn.vA,
static_cast<uint32_t>(shadow_frame.GetVReg(dec_insn.vB)) >>
- dec_insn.vC);
+ (dec_insn.vC & 0x1f));
break;
default:
LOG(FATAL) << "Unexpected instruction: " << inst->DumpString(&mh.GetDexFile());
diff --git a/src/jdwp/jdwp.h b/src/jdwp/jdwp.h
index 3186006..fbca7d1 100644
--- a/src/jdwp/jdwp.h
+++ b/src/jdwp/jdwp.h
@@ -286,7 +286,7 @@
explicit JdwpState(const JdwpOptions* options);
bool InvokeInProgress();
bool IsConnected();
- void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id)
+ void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy,
ObjectId threadId)
diff --git a/src/jdwp/jdwp_handler.cc b/src/jdwp/jdwp_handler.cc
index 07e47b5..88677d5 100644
--- a/src/jdwp/jdwp_handler.cc
+++ b/src/jdwp/jdwp_handler.cc
@@ -277,7 +277,10 @@
*/
static JdwpError VM_Suspend(JdwpState*, const uint8_t*, int, ExpandBuf*)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self = Thread::Current();
+ self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
Dbg::SuspendVM();
+ self->TransitionFromSuspendedToRunnable();
return ERR_NONE;
}
diff --git a/src/jdwp/jdwp_main.cc b/src/jdwp/jdwp_main.cc
index 0691515..33aadee 100644
--- a/src/jdwp/jdwp_main.cc
+++ b/src/jdwp/jdwp_main.cc
@@ -102,7 +102,7 @@
serial_lock_("JDWP serial lock", kJdwpSerialLock),
request_serial_(0x10000000),
event_serial_(0x20000000),
- event_list_lock_("JDWP event list lock"),
+ event_list_lock_("JDWP event list lock", kJdwpEventListLock),
event_list_(NULL),
event_list_size_(0),
event_thread_lock_("JDWP event thread lock"),
diff --git a/src/locks.h b/src/locks.h
index c009f1d..9da7711 100644
--- a/src/locks.h
+++ b/src/locks.h
@@ -37,18 +37,19 @@
kThreadSuspendCountLock = 2,
kAbortLock = 3,
kDefaultMutexLevel = 4,
- kJdwpAttachLock = 5,
- kJdwpStartLock = 6,
- kJdwpSerialLock = 7,
- kAllocSpaceLock = 8,
- kLoadLibraryLock = 9,
- kClassLinkerClassesLock = 10,
- kThreadListLock = 11,
- kRuntimeShutdownLock = 12,
- kHeapBitmapLock = 13,
- kMonitorLock = 14,
- kMutatorLock = 15,
- kZygoteCreationLock = 16,
+ kAllocSpaceLock = 5,
+ kLoadLibraryLock = 6,
+ kClassLinkerClassesLock = 7,
+ kThreadListLock = 8,
+ kJdwpEventListLock = 9,
+ kJdwpAttachLock = 10,
+ kJdwpStartLock = 11,
+ kJdwpSerialLock = 12,
+ kRuntimeShutdownLock = 13,
+ kHeapBitmapLock = 14,
+ kMonitorLock = 15,
+ kMutatorLock = 16,
+ kZygoteCreationLock = 17,
kMaxMutexLevel = kZygoteCreationLock,
};
std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
diff --git a/src/oat/runtime/arm/context_arm.cc b/src/oat/runtime/arm/context_arm.cc
index 057f41e..5bd4b3d 100644
--- a/src/oat/runtime/arm/context_arm.cc
+++ b/src/oat/runtime/arm/context_arm.cc
@@ -21,16 +21,20 @@
namespace art {
namespace arm {
-ArmContext::ArmContext() {
-#ifndef NDEBUG
- // Initialize registers with easy to spot debug values
- for (int i = 0; i < 16; i++) {
- gprs_[i] = kBadGprBase + i;
+static const uint32_t gZero = 0;
+
+void ArmContext::Reset() {
+ for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
+ gprs_[i] = NULL;
}
- for (int i = 0; i < 32; i++) {
- fprs_[i] = kBadFprBase + i;
+ for (size_t i = 0; i < kNumberOfSRegisters; i++) {
+ fprs_[i] = NULL;
}
-#endif
+ gprs_[SP] = &sp_;
+ gprs_[PC] = &pc_;
+ // Initialize registers with easy to spot debug values.
+ sp_ = ArmContext::kBadGprBase + SP;
+ pc_ = ArmContext::kBadGprBase + PC;
}
void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
@@ -41,40 +45,55 @@
size_t fp_spill_count = __builtin_popcount(fp_core_spills);
size_t frame_size = method->GetFrameSizeInBytes();
if (spill_count > 0) {
- // Lowest number spill is furthest away, walk registers and fill into context
+ // Lowest number spill is farthest away, walk registers and fill into context
int j = 1;
- for (int i = 0; i < 16; i++) {
+ for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
if (((core_spills >> i) & 1) != 0) {
- gprs_[i] = fr.LoadCalleeSave(spill_count - j, frame_size);
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
j++;
}
}
}
if (fp_spill_count > 0) {
- // Lowest number spill is furthest away, walk registers and fill into context
+ // Lowest number spill is farthest away, walk registers and fill into context
int j = 1;
- for (int i = 0; i < 32; i++) {
+ for (size_t i = 0; i < kNumberOfSRegisters; i++) {
if (((fp_core_spills >> i) & 1) != 0) {
- fprs_[i] = fr.LoadCalleeSave(spill_count + fp_spill_count - j, frame_size);
+ fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
j++;
}
}
}
}
+void ArmContext::SetGPR(uint32_t reg, uintptr_t value) {
+ CHECK_LT(reg, kNumberOfCoreRegisters);
+ CHECK_NE(gprs_[reg], &gZero); // Can't overwrite this static value since they are never reset.
+ CHECK(gprs_[reg] != NULL);
+ *gprs_[reg] = value;
+}
+
void ArmContext::SmashCallerSaves() {
- gprs_[0] = 0; // This needs to be 0 because we want a null/zero return value.
- gprs_[1] = kBadGprBase + 1;
- gprs_[2] = kBadGprBase + 2;
- gprs_[3] = kBadGprBase + 3;
- gprs_[IP] = kBadGprBase + IP;
- gprs_[LR] = kBadGprBase + LR;
+ // This needs to be 0 because we want a null/zero return value.
+ gprs_[R0] = const_cast<uint32_t*>(&gZero);
+ gprs_[R1] = const_cast<uint32_t*>(&gZero);
+ gprs_[R2] = NULL;
+ gprs_[R3] = NULL;
}
extern "C" void art_do_long_jump(uint32_t*, uint32_t*);
void ArmContext::DoLongJump() {
- art_do_long_jump(&gprs_[0], &fprs_[S0]);
+ uintptr_t gprs[16];
+ uint32_t fprs[32];
+ for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) {
+ gprs[i] = gprs_[i] != NULL ? *gprs_[i] : ArmContext::kBadGprBase + i;
+ }
+ for (size_t i = 0; i < kNumberOfSRegisters; ++i) {
+ fprs[i] = fprs_[i] != NULL ? *fprs_[i] : ArmContext::kBadGprBase + i;
+ }
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(Thread::Current()), gprs[TR]);
+ art_do_long_jump(gprs, fprs);
}
} // namespace arm
diff --git a/src/oat/runtime/arm/context_arm.h b/src/oat/runtime/arm/context_arm.h
index 6f42cc3..a2b9ebe 100644
--- a/src/oat/runtime/arm/context_arm.h
+++ b/src/oat/runtime/arm/context_arm.h
@@ -17,6 +17,7 @@
#ifndef ART_SRC_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
#define ART_SRC_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
+#include "locks.h"
#include "constants_arm.h"
#include "oat/runtime/context.h"
@@ -25,31 +26,39 @@
class ArmContext : public Context {
public:
- ArmContext();
+ ArmContext() {
+ Reset();
+ }
+
virtual ~ArmContext() {}
+ virtual void Reset();
+
virtual void FillCalleeSaves(const StackVisitor& fr);
virtual void SetSP(uintptr_t new_sp) {
- gprs_[SP] = new_sp;
+ SetGPR(SP, new_sp);
}
virtual void SetPC(uintptr_t new_pc) {
- gprs_[PC] = new_pc;
+ SetGPR(PC, new_pc);
}
virtual uintptr_t GetGPR(uint32_t reg) {
- CHECK_GE(reg, 0u);
- CHECK_LT(reg, 16u);
- return gprs_[reg];
+ CHECK_LT(reg, kNumberOfCoreRegisters);
+ return *gprs_[reg];
}
+ virtual void SetGPR(uint32_t reg, uintptr_t value);
virtual void SmashCallerSaves();
virtual void DoLongJump();
private:
- uintptr_t gprs_[16];
- uint32_t fprs_[32];
+ // Pointers to register locations, initialized to NULL or the specific registers below.
+ uintptr_t* gprs_[kNumberOfCoreRegisters];
+ uint32_t* fprs_[kNumberOfSRegisters];
+ // Hold values for sp and pc if they are not located within a stack frame.
+ uintptr_t sp_, pc_;
};
} // namespace arm
diff --git a/src/oat/runtime/context.h b/src/oat/runtime/context.h
index 317030f..895abf9 100644
--- a/src/oat/runtime/context.h
+++ b/src/oat/runtime/context.h
@@ -24,7 +24,8 @@
class StackVisitor;
-// Representation of a thread's context on the executing machine
+// Representation of a thread's context on the executing machine, used to implement long jumps in
+// the quick stack frame layout.
class Context {
public:
// Creates a context for the running architecture
@@ -32,6 +33,9 @@
virtual ~Context() {}
+ // Re-initializes the registers for context re-use.
+ virtual void Reset() = 0;
+
// Read values from callee saves in the given frame. The frame also holds
// the method that holds the layout.
virtual void FillCalleeSaves(const StackVisitor& fr) = 0;
@@ -45,12 +49,16 @@
// Read the given GPR
virtual uintptr_t GetGPR(uint32_t reg) = 0;
+ // Set the given GPR.
+ virtual void SetGPR(uint32_t reg, uintptr_t value) = 0;
+
// Smash the caller save registers. If we're throwing, we don't want to return bogus values.
virtual void SmashCallerSaves() = 0;
// Switch execution of the executing context to this context
virtual void DoLongJump() = 0;
+ protected:
enum {
kBadGprBase = 0xebad6070,
kBadFprBase = 0xebad8070,
diff --git a/src/oat/runtime/mips/context_mips.cc b/src/oat/runtime/mips/context_mips.cc
index dc13c63..0c2f915 100644
--- a/src/oat/runtime/mips/context_mips.cc
+++ b/src/oat/runtime/mips/context_mips.cc
@@ -21,16 +21,20 @@
namespace art {
namespace mips {
-MipsContext::MipsContext() {
-#ifndef NDEBUG
+static const uint32_t gZero = 0;
+
+void MipsContext::Reset() {
+ for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
+ gprs_[i] = NULL;
+ }
+ for (size_t i = 0; i < kNumberOfFRegisters; i++) {
+ fprs_[i] = NULL;
+ }
+ gprs_[SP] = &sp_;
+ gprs_[RA] = &ra_;
// Initialize registers with easy to spot debug values.
- for (int i = 0; i < 32; i++) {
- gprs_[i] = kBadGprBase + i;
- }
- for (int i = 0; i < 32; i++) {
- fprs_[i] = kBadGprBase + i;
- }
-#endif
+ sp_ = MipsContext::kBadGprBase + SP;
+ ra_ = MipsContext::kBadGprBase + RA;
}
void MipsContext::FillCalleeSaves(const StackVisitor& fr) {
@@ -41,39 +45,55 @@
size_t fp_spill_count = __builtin_popcount(fp_core_spills);
size_t frame_size = method->GetFrameSizeInBytes();
if (spill_count > 0) {
- // Lowest number spill is furthest away, walk registers and fill into context.
+ // Lowest number spill is farthest away, walk registers and fill into context.
int j = 1;
- for (int i = 0; i < 32; i++) {
+ for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
if (((core_spills >> i) & 1) != 0) {
- gprs_[i] = fr.LoadCalleeSave(spill_count - j, frame_size);
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
j++;
}
}
}
if (fp_spill_count > 0) {
- // Lowest number spill is furthest away, walk registers and fill into context.
+ // Lowest number spill is farthest away, walk registers and fill into context.
int j = 1;
- for (int i = 0; i < 32; i++) {
+ for (size_t i = 0; i < kNumberOfFRegisters; i++) {
if (((fp_core_spills >> i) & 1) != 0) {
- fprs_[i] = fr.LoadCalleeSave(spill_count + fp_spill_count - j, frame_size);
+ fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
j++;
}
}
}
}
+void MipsContext::SetGPR(uint32_t reg, uintptr_t value) {
+ CHECK_LT(reg, kNumberOfCoreRegisters);
+ CHECK_NE(gprs_[reg], &gZero); // Can't overwrite this static value since they are never reset.
+ CHECK(gprs_[reg] != NULL);
+ *gprs_[reg] = value;
+}
+
void MipsContext::SmashCallerSaves() {
- gprs_[V0] = 0; // This needs to be 0 because we want a null/zero return value.
- gprs_[V1] = 0; // This needs to be 0 because we want a null/zero return value.
- gprs_[A1] = kBadGprBase + A1;
- gprs_[A2] = kBadGprBase + A2;
- gprs_[A3] = kBadGprBase + A3;
+ // This needs to be 0 because we want a null/zero return value.
+ gprs_[V0] = const_cast<uint32_t*>(&gZero);
+ gprs_[V1] = const_cast<uint32_t*>(&gZero);
+ gprs_[A1] = NULL;
+ gprs_[A2] = NULL;
+ gprs_[A3] = NULL;
}
extern "C" void art_do_long_jump(uint32_t*, uint32_t*);
void MipsContext::DoLongJump() {
- art_do_long_jump(&gprs_[ZERO], &fprs_[F0]);
+ uintptr_t gprs[kNumberOfCoreRegisters];
+ uint32_t fprs[kNumberOfFRegisters];
+ for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) {
+ gprs[i] = gprs_[i] != NULL ? *gprs_[i] : MipsContext::kBadGprBase + i;
+ }
+ for (size_t i = 0; i < kNumberOfFRegisters; ++i) {
+ fprs[i] = fprs_[i] != NULL ? *fprs_[i] : MipsContext::kBadGprBase + i;
+ }
+ art_do_long_jump(gprs, fprs);
}
} // namespace mips
diff --git a/src/oat/runtime/mips/context_mips.h b/src/oat/runtime/mips/context_mips.h
index 1a86ca3..d4944a6 100644
--- a/src/oat/runtime/mips/context_mips.h
+++ b/src/oat/runtime/mips/context_mips.h
@@ -25,32 +25,38 @@
class MipsContext : public Context {
public:
- MipsContext();
+ MipsContext() {
+ Reset();
+ }
virtual ~MipsContext() {}
- // No callee saves on mips
+ virtual void Reset();
+
virtual void FillCalleeSaves(const StackVisitor& fr);
virtual void SetSP(uintptr_t new_sp) {
- gprs_[SP] = new_sp;
+ SetGPR(SP, new_sp);
}
virtual void SetPC(uintptr_t new_pc) {
- gprs_[RA] = new_pc;
+ SetGPR(RA, new_pc);
}
virtual uintptr_t GetGPR(uint32_t reg) {
- CHECK_GE(reg, 0u);
- CHECK_LT(reg, 32u);
+ CHECK_LT(reg, kNumberOfCoreRegisters);
return gprs_[reg];
}
+ virtual void SetGPR(uint32_t reg, uintptr_t value);
virtual void SmashCallerSaves();
virtual void DoLongJump();
private:
- uintptr_t gprs_[32];
- uint32_t fprs_[32];
+ // Pointers to registers in the stack, initialized to NULL except for the special cases below.
+ uintptr_t* gprs_[kNumberOfCoreRegisters];
+ uint32_t* fprs_[kNumberOfFRegisters];
+ // Hold values for sp and ra (return address) if they are not located within a stack frame.
+ uintptr_t sp_, ra_;
};
} // namespace mips
} // namespace art
diff --git a/src/oat/runtime/support_invoke.cc b/src/oat/runtime/support_invoke.cc
index 4f16afe..7a49489 100644
--- a/src/oat/runtime/support_invoke.cc
+++ b/src/oat/runtime/support_invoke.cc
@@ -137,10 +137,6 @@
AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
if (UNLIKELY(method == NULL)) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
- if (UNLIKELY(this_object == NULL && type != kDirect && type != kStatic)) {
- ThrowNullPointerExceptionForMethodAccess(caller_method, method_idx, type);
- return 0; // failure
- }
method = FindMethodFromCode(method_idx, this_object, caller_method, self, access_check, type);
if (UNLIKELY(method == NULL)) {
CHECK(self->IsExceptionPending());
diff --git a/src/oat/runtime/x86/context_x86.cc b/src/oat/runtime/x86/context_x86.cc
index 4ff2283..4efdf81 100644
--- a/src/oat/runtime/x86/context_x86.cc
+++ b/src/oat/runtime/x86/context_x86.cc
@@ -21,14 +21,16 @@
namespace art {
namespace x86 {
-X86Context::X86Context() {
-#ifndef NDEBUG
- // Initialize registers with easy to spot debug values.
- for (int i = 0; i < 8; i++) {
- gprs_[i] = kBadGprBase + i;
+static const uint32_t gZero = 0;
+
+void X86Context::Reset() {
+ for (int i = 0; i < kNumberOfCpuRegisters; i++) {
+ gprs_[i] = NULL;
}
- eip_ = 0xEBAD601F;
-#endif
+ gprs_[ESP] = &esp_;
+ // Initialize registers with easy to spot debug values.
+ esp_ = X86Context::kBadGprBase + ESP;
+ eip_ = X86Context::kBadGprBase + kNumberOfCpuRegisters;
}
void X86Context::FillCalleeSaves(const StackVisitor& fr) {
@@ -38,11 +40,11 @@
DCHECK_EQ(method->GetFpSpillMask(), 0u);
size_t frame_size = method->GetFrameSizeInBytes();
if (spill_count > 0) {
- // Lowest number spill is furthest away, walk registers and fill into context.
+ // Lowest number spill is farthest away, walk registers and fill into context.
int j = 2; // Offset j to skip return address spill.
- for (int i = 0; i < 8; i++) {
+ for (int i = 0; i < kNumberOfCpuRegisters; i++) {
if (((core_spills >> i) & 1) != 0) {
- gprs_[i] = fr.LoadCalleeSave(spill_count - j, frame_size);
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
j++;
}
}
@@ -50,37 +52,40 @@
}
void X86Context::SmashCallerSaves() {
- gprs_[EAX] = 0; // This needs to be 0 because we want a null/zero return value.
- gprs_[ECX] = kBadGprBase + ECX;
- gprs_[EDX] = kBadGprBase + EDX;
- gprs_[EBX] = kBadGprBase + EBX;
+ // This needs to be 0 because we want a null/zero return value.
+ gprs_[EAX] = const_cast<uint32_t*>(&gZero);
+ gprs_[EDX] = const_cast<uint32_t*>(&gZero);
+ gprs_[ECX] = NULL;
+ gprs_[EBX] = NULL;
+}
+
+void X86Context::SetGPR(uint32_t reg, uintptr_t value){
+ CHECK_LT(reg, kNumberOfCpuRegisters);
+ CHECK_NE(gprs_[reg], &gZero);
+ CHECK(gprs_[reg] != NULL);
+ *gprs_[reg] = value;
}
void X86Context::DoLongJump() {
#if defined(__i386__)
- // We push all the registers using memory-memory pushes, we then pop-all to get the registers
- // set up, we then pop esp which will move us down the stack to the delivery address. At the frame
- // where the exception will be delivered, we push EIP so that the return will take us to the
- // correct delivery instruction.
- gprs_[ESP] -= 4;
- *(reinterpret_cast<uintptr_t*>(gprs_[ESP])) = eip_;
+ // Array of GPR values, filled from the context backward for the long jump pop. We add a slot at
+ // the top for the stack pointer that doesn't get popped in a pop-all.
+ volatile uintptr_t gprs[kNumberOfCpuRegisters + 1];
+ for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) {
+ gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != NULL ? *gprs_[i] : X86Context::kBadGprBase + i;
+ }
+ // We want to load the stack pointer one slot below so that the ret will pop eip.
+ uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - kWordSize;
+ gprs[kNumberOfCpuRegisters] = esp;
+ *(reinterpret_cast<uintptr_t*>(esp)) = eip_;
__asm__ __volatile__(
- "pushl %4\n\t"
- "pushl %0\n\t"
- "pushl %1\n\t"
- "pushl %2\n\t"
- "pushl %3\n\t"
- "pushl %4\n\t"
- "pushl %5\n\t"
- "pushl %6\n\t"
- "pushl %7\n\t"
- "popal\n\t"
- "popl %%esp\n\t"
- "ret\n\t"
- : //output
- : "g"(gprs_[EAX]), "g"(gprs_[ECX]), "g"(gprs_[EDX]), "g"(gprs_[EBX]),
- "g"(gprs_[ESP]), "g"(gprs_[EBP]), "g"(gprs_[ESI]), "g"(gprs_[EDI])
- :); // clobber
+ "movl %0, %%esp\n\t" // ESP points to gprs.
+ "popal\n\t" // Load all registers except ESP and EIP with values in gprs.
+ "popl %%esp\n\t" // Load stack pointer.
+ "ret\n\t" // From higher in the stack pop eip.
+ : // output.
+ : "g"(&gprs[0]) // input.
+ :); // clobber.
#else
UNIMPLEMENTED(FATAL);
#endif
diff --git a/src/oat/runtime/x86/context_x86.h b/src/oat/runtime/x86/context_x86.h
index 3d6b1d9..ad49f8d 100644
--- a/src/oat/runtime/x86/context_x86.h
+++ b/src/oat/runtime/x86/context_x86.h
@@ -25,14 +25,17 @@
class X86Context : public Context {
public:
- X86Context();
+ X86Context() {
+ Reset();
+ }
virtual ~X86Context() {}
- // No callee saves on X86
+ virtual void Reset();
+
virtual void FillCalleeSaves(const StackVisitor& fr);
virtual void SetSP(uintptr_t new_sp) {
- gprs_[ESP] = new_sp;
+ SetGPR(ESP, new_sp);
}
virtual void SetPC(uintptr_t new_pc) {
@@ -40,17 +43,23 @@
}
virtual uintptr_t GetGPR(uint32_t reg) {
- CHECK_GE(reg, 0u);
- CHECK_LT(reg, 8u);
- return gprs_[reg];
+ CHECK_LT(reg, kNumberOfCpuRegisters);
+ return *gprs_[reg];
}
+ virtual void SetGPR(uint32_t reg, uintptr_t value);
+
virtual void SmashCallerSaves();
virtual void DoLongJump();
private:
- uintptr_t gprs_[8];
- uintptr_t eip_;
+ // Pointers to register locations, floating point registers are all caller save. Values are
+ // initialized to NULL or the special registers below.
+ uintptr_t* gprs_[kNumberOfCpuRegisters];
+ // Hold values for esp and eip if they are not located within a stack frame. EIP is somewhat
+ // special in that it cannot be encoded normally as a register operand to an instruction (except
+ // in 64bit addressing modes).
+ uintptr_t esp_, eip_;
};
} // namespace x86
} // namespace art
diff --git a/src/runtime_support.cc b/src/runtime_support.cc
index 92c5e3a..856e877 100644
--- a/src/runtime_support.cc
+++ b/src/runtime_support.cc
@@ -208,7 +208,7 @@
}
// Slow path method resolution
-AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, const AbstractMethod* referrer,
+AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, AbstractMethod* referrer,
Thread* self, bool access_check, InvokeType type) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
bool is_direct = type == kStatic || type == kDirect;
@@ -216,6 +216,11 @@
if (UNLIKELY(resolved_method == NULL)) {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
return NULL; // Failure.
+ } else if (UNLIKELY(this_object == NULL && type != kStatic)) {
+ // Maintain interpreter-like semantics where NullPointerException is thrown
+ // after potential NoSuchMethodError from class linker.
+ ThrowNullPointerExceptionForMethodAccess(referrer, method_idx, type);
+ return NULL; // Failure.
} else {
if (!access_check) {
if (is_direct) {
diff --git a/src/runtime_support.h b/src/runtime_support.h
index d0a6209..adeedb7 100644
--- a/src/runtime_support.h
+++ b/src/runtime_support.h
@@ -223,7 +223,7 @@
}
}
-extern AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, const AbstractMethod* referrer,
+extern AbstractMethod* FindMethodFromCode(uint32_t method_idx, Object* this_object, AbstractMethod* referrer,
Thread* self, bool access_check, InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/src/stack.cc b/src/stack.cc
index be6fe45..ed44df9 100644
--- a/src/stack.cc
+++ b/src/stack.cc
@@ -99,16 +99,20 @@
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, vmap_offset, kind)) {
- UNIMPLEMENTED(FATAL);
+ bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+ uint32_t spill_mask = is_float ? m->GetFpSpillMask() : m->GetCoreSpillMask();
+ const uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kReferenceVReg);
+ SetGPR(reg, new_value);
+ } else {
+ const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
+ DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
+ uint32_t core_spills = m->GetCoreSpillMask();
+ uint32_t fp_spills = m->GetFpSpillMask();
+ size_t frame_size = m->GetFrameSizeInBytes();
+ int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
+ byte* vreg_addr = reinterpret_cast<byte*>(GetCurrentQuickFrame()) + offset;
+ *reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
}
- const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
- DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
- uint32_t core_spills = m->GetCoreSpillMask();
- uint32_t fp_spills = m->GetFpSpillMask();
- size_t frame_size = m->GetFrameSizeInBytes();
- int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
- byte* vreg_addr = reinterpret_cast<byte*>(GetCurrentQuickFrame()) + offset;
- *reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
} else {
return cur_shadow_frame_->SetVReg(vreg, new_value);
}
@@ -119,6 +123,11 @@
return context_->GetGPR(reg);
}
+void StackVisitor::SetGPR(uint32_t reg, uintptr_t value) {
+ DCHECK (cur_quick_frame_ != NULL) << "This is a quick frame routine";
+ context_->SetGPR(reg, value);
+}
+
uintptr_t StackVisitor::GetReturnPc() const {
AbstractMethod** sp = GetCurrentQuickFrame();
DCHECK(sp != NULL);
diff --git a/src/stack.h b/src/stack.h
index ce84807..ecfa846 100644
--- a/src/stack.h
+++ b/src/stack.h
@@ -51,9 +51,10 @@
kUndefined,
};
-// ShadowFrame has 3 possible layouts: portable (VRegs & references overlap),
-// interpreter (VRegs and separate references array), JNI (just VRegs, but where
-// VRegs really => references).
+// ShadowFrame has 3 possible layouts:
+// - portable - a unified array of VRegs and references. Precise references need GC maps.
+// - interpreter - separate VRegs and reference arrays. References are in the reference array.
+// - JNI - just VRegs, but where every VReg holds a reference.
class ShadowFrame {
public:
// Create ShadowFrame for interpreter.
@@ -77,7 +78,6 @@
}
void SetNumberOfVRegs(uint32_t number_of_vregs) {
- DCHECK(number_of_vregs < kHasReferenceArray);
number_of_vregs_ = number_of_vregs | (number_of_vregs_ & kHasReferenceArray);
}
@@ -207,18 +207,21 @@
ShadowFrame(uint32_t num_vregs, ShadowFrame* link, AbstractMethod* method, uint32_t dex_pc,
bool has_reference_array)
: number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
+ CHECK_LT(num_vregs, static_cast<uint32_t>(kHasReferenceArray));
if (has_reference_array) {
number_of_vregs_ |= kHasReferenceArray;
for (size_t i = 0; i < num_vregs; ++i) {
SetVRegReference(i, NULL);
}
- }
- for (size_t i = 0; i < num_vregs; ++i) {
- SetVReg(i, 0);
+ } else {
+ for (size_t i = 0; i < num_vregs; ++i) {
+ SetVReg(i, 0);
+ }
}
}
Object* const* References() const {
+ DCHECK(HasReferenceArray());
const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
return reinterpret_cast<Object* const*>(vreg_end);
}
@@ -362,16 +365,15 @@
size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uintptr_t LoadCalleeSave(int num, size_t frame_size) const {
+ uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const {
// Callee saves are held at the top of the frame
- AbstractMethod* method = GetMethod();
- DCHECK(method != NULL);
+ DCHECK(GetMethod() != NULL);
byte* save_addr =
reinterpret_cast<byte*>(cur_quick_frame_) + frame_size - ((num + 1) * kPointerSize);
#if defined(__i386__)
save_addr -= kPointerSize; // account for return address
#endif
- return *reinterpret_cast<uintptr_t*>(save_addr);
+ return reinterpret_cast<uintptr_t*>(save_addr);
}
// Returns the height of the stack in the managed stack frames, including transitions.
@@ -398,6 +400,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uintptr_t GetGPR(uint32_t reg) const;
+ void SetGPR(uint32_t reg, uintptr_t value);
uint32_t GetVReg(AbstractMethod** cur_quick_frame, const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
diff --git a/src/thread.cc b/src/thread.cc
index 7490d2a..fb7aa5a 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -1725,8 +1725,6 @@
void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
AbstractMethod* catch_method = *handler_quick_frame_;
- Dbg::PostException(self_, throw_frame_id_, throw_method_, throw_dex_pc_,
- catch_method, handler_dex_pc_, exception_);
if (kDebugExceptionDelivery) {
if (catch_method == NULL) {
LOG(INFO) << "Handler is upcall";
@@ -1738,6 +1736,9 @@
}
self_->SetException(exception_); // Exception back in root set.
self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_);
+ // Do debugger PostException after allowing thread suspension again.
+ Dbg::PostException(self_, throw_frame_id_, throw_method_, throw_dex_pc_,
+ catch_method, handler_dex_pc_, exception_);
// Place context back on thread so it will be available when we continue.
self_->ReleaseLongJumpContext(context_);
context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
@@ -1793,6 +1794,7 @@
result = Context::Create();
} else {
long_jump_context_ = NULL; // Avoid context being shared.
+ result->Reset();
}
return result;
}
diff --git a/src/thread.h b/src/thread.h
index 7bd64c8..4d97315 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -128,9 +128,11 @@
}
static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, Object* thread_peer)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/src/thread_list.cc b/src/thread_list.cc
index d39d424..3834725 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -384,16 +384,18 @@
Thread* debug_thread = Dbg::GetDebugThread();
CHECK(debug_thread != NULL);
CHECK(self != debug_thread);
+ CHECK_NE(self->GetState(), kRunnable);
+ Locks::mutator_lock_->AssertNotHeld(self);
- // Collisions with other suspends aren't really interesting. We want
- // to ensure that we're the only one fiddling with the suspend count
- // though.
- MutexLock mu(self, *Locks::thread_suspend_count_lock_);
- self->ModifySuspendCount(self, +1, true);
+ {
+ // Collisions with other suspends aren't really interesting. We want
+ // to ensure that we're the only one fiddling with the suspend count
+ // though.
+ MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ self->ModifySuspendCount(self, +1, true);
+ CHECK_GT(self->suspend_count_, 0);
+ }
- // Suspend ourselves.
- CHECK_GT(self->suspend_count_, 0);
- self->SetState(kSuspended);
VLOG(threads) << *self << " self-suspending (debugger)";
// Tell JDWP that we've completed suspension. The JDWP thread can't
@@ -401,19 +403,22 @@
// suspend count lock.
Dbg::ClearWaitForEventThread();
- while (self->suspend_count_ != 0) {
- Thread::resume_cond_->Wait(self);
- if (self->suspend_count_ != 0) {
- // The condition was signaled but we're still suspended. This
- // can happen if the debugger lets go while a SIGQUIT thread
- // dump event is pending (assuming SignalCatcher was resumed for
- // just long enough to try to grab the thread-suspend lock).
- LOG(DEBUG) << *self << " still suspended after undo "
- << "(suspend count=" << self->suspend_count_ << ")";
+ {
+ MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ while (self->suspend_count_ != 0) {
+ Thread::resume_cond_->Wait(self);
+ if (self->suspend_count_ != 0) {
+ // The condition was signaled but we're still suspended. This
+ // can happen if the debugger lets go while a SIGQUIT thread
+ // dump event is pending (assuming SignalCatcher was resumed for
+ // just long enough to try to grab the thread-suspend lock).
+ LOG(DEBUG) << *self << " still suspended after undo "
+ << "(suspend count=" << self->suspend_count_ << ")";
+ }
}
+ CHECK_EQ(self->suspend_count_, 0);
}
- CHECK_EQ(self->suspend_count_, 0);
- self->SetState(kRunnable);
+
VLOG(threads) << *self << " self-reviving (debugger)";
}
diff --git a/test/084-class-init/expected.txt b/test/084-class-init/expected.txt
index 5b0b3ff..1389214 100644
--- a/test/084-class-init/expected.txt
+++ b/test/084-class-init/expected.txt
@@ -2,6 +2,7 @@
Got expected EIIE for FIELD0
Got expected NCDFE for FIELD0
Got expected NCDFE for FIELD1
+Got expected 'hello!' from Exploder
SlowInit static block pre-sleep
SlowInit static block post-sleep
MethodThread message
diff --git a/test/084-class-init/src/Exploder.java b/test/084-class-init/src/Exploder.java
new file mode 100644
index 0000000..911e5fe
--- /dev/null
+++ b/test/084-class-init/src/Exploder.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ * Throws an Error rather than an exception from its class initializer.
+ */
+public class Exploder {
+ public static final Object FIELD = new AssertThrower();
+ static class AssertThrower {
+ AssertThrower() {
+ throw new AssertionError("hello!");
+ }
+ }
+}
diff --git a/test/084-class-init/src/Main.java b/test/084-class-init/src/Main.java
index abad1f0..cf69570 100644
--- a/test/084-class-init/src/Main.java
+++ b/test/084-class-init/src/Main.java
@@ -67,6 +67,13 @@
} catch (NoClassDefFoundError ncdfe) {
System.out.println("Got expected NCDFE for FIELD1");
}
+
+ try {
+ System.out.println(Exploder.FIELD);
+ System.err.println("Load of FIELD succeeded unexpectedly");
+ } catch (AssertionError expected) {
+ System.out.println("Got expected '" + expected.getMessage() + "' from Exploder");
+ }
}
static void checkTiming() {