Merge "Address comments from aog/303658."
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 31a7529..7c02384 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -879,7 +879,7 @@
elf_header.e_ident[EI_MAG2] = ELFMAG2;
elf_header.e_ident[EI_MAG3] = ELFMAG3;
elf_header.e_ident[EI_CLASS] = (sizeof(Elf_Addr) == sizeof(Elf32_Addr))
- ? ELFCLASS32 : ELFCLASS64;;
+ ? ELFCLASS32 : ELFCLASS64;
elf_header.e_ident[EI_DATA] = ELFDATA2LSB;
elf_header.e_ident[EI_VERSION] = EV_CURRENT;
elf_header.e_ident[EI_OSABI] = ELFOSABI_LINUX;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 046c2d8..7c3a2c6 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -489,14 +489,6 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location arg0, arg1;
- if (instruction_->IsInstanceOf()) {
- arg0 = locations->InAt(1);
- arg1 = locations->Out();
- } else {
- arg0 = locations->InAt(0);
- arg1 = locations->InAt(1);
- }
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -510,10 +502,10 @@
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(arg0,
+ codegen->EmitParallelMoves(locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- arg1,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
@@ -521,7 +513,7 @@
instruction_,
instruction_->GetDexPc(),
this);
- CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
} else {
DCHECK(instruction_->IsCheckCast());
@@ -6114,16 +6106,15 @@
__ CompareAndBranchIfZero(obj, &zero);
}
- // /* HeapReference<Class> */ out = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- out_loc,
- obj_loc,
- class_offset,
- maybe_temp_loc,
- kCompilerReadBarrierOption);
-
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
__ cmp(out, ShifterOperand(cls));
// Classes must be equal for the instanceof to succeed.
__ b(&zero, NE);
@@ -6133,6 +6124,13 @@
}
case TypeCheckKind::kAbstractClassCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
Label loop;
@@ -6155,6 +6153,13 @@
}
case TypeCheckKind::kClassHierarchyCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Walk over the class hierarchy to find a match.
Label loop, success;
__ Bind(&loop);
@@ -6178,6 +6183,13 @@
}
case TypeCheckKind::kArrayObjectCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Do an exact check.
Label exact_check;
__ cmp(out, ShifterOperand(cls));
@@ -6201,6 +6213,14 @@
}
case TypeCheckKind::kArrayCheck: {
+ // No read barrier since the slow path will retry upon failure.
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kWithoutReadBarrier);
__ cmp(out, ShifterOperand(cls));
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction,
@@ -6470,12 +6490,9 @@
iftable_offset,
maybe_temp2_loc,
kWithoutReadBarrier);
- // Null iftable means it is empty and will always fail the check.
- __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
-
- // Loop through the iftable and check if any class matches.
+ // Iftable is never null.
__ ldr(maybe_temp2_loc.AsRegister<Register>(), Address(temp, array_length_offset));
-
+ // Loop through the iftable and check if any class matches.
Label start_loop;
__ Bind(&start_loop);
__ CompareAndBranchIfZero(maybe_temp2_loc.AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a085fea..6ec9c91 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -459,14 +459,6 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location arg0, arg1;
- if (instruction_->IsInstanceOf()) {
- arg0 = locations->InAt(1);
- arg1 = locations->Out();
- } else {
- arg0 = locations->InAt(0);
- arg1 = locations->InAt(1);
- }
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -482,15 +474,15 @@
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(arg0,
+ codegen->EmitParallelMoves(locations->InAt(0),
LocationFrom(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- arg1,
+ locations->InAt(1),
LocationFrom(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
arm64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
@@ -3427,16 +3419,15 @@
__ Cbz(obj, &zero);
}
- // /* HeapReference<Class> */ out = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- out_loc,
- obj_loc,
- class_offset,
- maybe_temp_loc,
- kCompilerReadBarrierOption);
-
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
__ Cmp(out, cls);
__ Cset(out, eq);
if (zero.IsLinked()) {
@@ -3446,6 +3437,13 @@
}
case TypeCheckKind::kAbstractClassCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
vixl::aarch64::Label loop, success;
@@ -3468,6 +3466,13 @@
}
case TypeCheckKind::kClassHierarchyCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Walk over the class hierarchy to find a match.
vixl::aarch64::Label loop, success;
__ Bind(&loop);
@@ -3491,6 +3496,13 @@
}
case TypeCheckKind::kArrayObjectCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kCompilerReadBarrierOption);
// Do an exact check.
vixl::aarch64::Label exact_check;
__ Cmp(out, cls);
@@ -3514,6 +3526,14 @@
}
case TypeCheckKind::kArrayCheck: {
+ // No read barrier since the slow path will retry upon failure.
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc,
+ kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
@@ -3782,12 +3802,9 @@
iftable_offset,
maybe_temp2_loc,
kWithoutReadBarrier);
- // Null iftable means it is empty and will always fail the check.
- __ Cbz(temp, type_check_slow_path->GetEntryLabel());
-
- // Loop through the iftable and check if any class matches.
+ // Iftable is never null.
__ Ldr(WRegisterFrom(maybe_temp2_loc), HeapOperand(temp.W(), array_length_offset));
-
+ // Loop through the iftable and check if any class matches.
vixl::aarch64::Label start_loop;
__ Bind(&start_loop);
__ Cbz(WRegisterFrom(maybe_temp2_loc), type_check_slow_path->GetEntryLabel());
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index f7957d4..62bf7b9 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -444,14 +444,6 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location arg0, arg1;
- if (instruction_->IsInstanceOf()) {
- arg0 = locations->InAt(1);
- arg1 = locations->Out();
- } else {
- arg0 = locations->InAt(0);
- arg1 = locations->InAt(1);
- }
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -466,10 +458,10 @@
// move resolver.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- codegen->EmitParallelMoves(arg0,
+ codegen->EmitParallelMoves(locations->InAt(0),
LocationFrom(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- arg1,
+ locations->InAt(1),
LocationFrom(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
@@ -477,8 +469,7 @@
instruction_,
instruction_->GetDexPc(),
this);
- CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
arm_codegen->Move32(locations->Out(), LocationFrom(r0));
} else {
DCHECK(instruction_->IsCheckCast());
@@ -2025,7 +2016,7 @@
case Primitive::kPrimFloat: {
// Processing a Dex `float-to-int' instruction.
vixl32::SRegister temp = LowSRegisterFrom(locations->GetTemp(0));
- __ Vcvt(I32, F32, temp, InputSRegisterAt(conversion, 0));
+ __ Vcvt(S32, F32, temp, InputSRegisterAt(conversion, 0));
__ Vmov(OutputRegister(conversion), temp);
break;
}
@@ -2033,7 +2024,7 @@
case Primitive::kPrimDouble: {
// Processing a Dex `double-to-int' instruction.
vixl32::SRegister temp_s = LowSRegisterFrom(locations->GetTemp(0));
- __ Vcvt(I32, F64, temp_s, DRegisterFrom(in));
+ __ Vcvt(S32, F64, temp_s, DRegisterFrom(in));
__ Vmov(OutputRegister(conversion), temp_s);
break;
}
@@ -2109,7 +2100,7 @@
case Primitive::kPrimChar: {
// Processing a Dex `int-to-float' instruction.
__ Vmov(OutputSRegister(conversion), InputRegisterAt(conversion, 0));
- __ Vcvt(F32, I32, OutputSRegister(conversion), OutputSRegister(conversion));
+ __ Vcvt(F32, S32, OutputSRegister(conversion), OutputSRegister(conversion));
break;
}
@@ -2140,7 +2131,7 @@
case Primitive::kPrimChar: {
// Processing a Dex `int-to-double' instruction.
__ Vmov(LowSRegisterFrom(out), InputRegisterAt(conversion, 0));
- __ Vcvt(F64, I32, DRegisterFrom(out), LowSRegisterFrom(out));
+ __ Vcvt(F64, S32, DRegisterFrom(out), LowSRegisterFrom(out));
break;
}
@@ -2148,18 +2139,15 @@
// Processing a Dex `long-to-double' instruction.
vixl32::Register low = LowRegisterFrom(in);
vixl32::Register high = HighRegisterFrom(in);
-
vixl32::SRegister out_s = LowSRegisterFrom(out);
vixl32::DRegister out_d = DRegisterFrom(out);
-
vixl32::SRegister temp_s = LowSRegisterFrom(locations->GetTemp(0));
vixl32::DRegister temp_d = DRegisterFrom(locations->GetTemp(0));
-
- vixl32::DRegister constant_d = DRegisterFrom(locations->GetTemp(0));
+ vixl32::DRegister constant_d = DRegisterFrom(locations->GetTemp(1));
// temp_d = int-to-double(high)
__ Vmov(temp_s, high);
- __ Vcvt(F64, I32, temp_d, temp_s);
+ __ Vcvt(F64, S32, temp_d, temp_s);
// constant_d = k2Pow32EncodingForDouble
__ Vmov(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble));
// out_d = unsigned-to-double(low)
@@ -5165,11 +5153,14 @@
__ Cbz(obj, &zero);
}
- // /* HeapReference<Class> */ out = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, maybe_temp_loc);
-
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc);
__ Cmp(out, cls);
// Classes must be equal for the instanceof to succeed.
__ B(ne, &zero);
@@ -5179,6 +5170,12 @@
}
case TypeCheckKind::kAbstractClassCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
vixl32::Label loop;
@@ -5197,6 +5194,12 @@
}
case TypeCheckKind::kClassHierarchyCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc);
// Walk over the class hierarchy to find a match.
vixl32::Label loop, success;
__ Bind(&loop);
@@ -5216,6 +5219,12 @@
}
case TypeCheckKind::kArrayObjectCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc);
// Do an exact check.
vixl32::Label exact_check;
__ Cmp(out, cls);
@@ -5235,6 +5244,12 @@
}
case TypeCheckKind::kArrayCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ maybe_temp_loc);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index f169eb0..573bb50 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -378,14 +378,6 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location arg0, arg1;
- if (instruction_->IsInstanceOf()) {
- arg0 = locations->InAt(1);
- arg1 = locations->Out();
- } else {
- arg0 = locations->InAt(0);
- arg1 = locations->InAt(1);
- }
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -397,15 +389,15 @@
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(arg0,
+ codegen->EmitParallelMoves(locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- arg1,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 7598740..1a54935 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -322,14 +322,6 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location arg0, arg1;
- if (instruction_->IsInstanceOf()) {
- arg0 = locations->InAt(1);
- arg1 = locations->Out();
- } else {
- arg0 = locations->InAt(0);
- arg1 = locations->InAt(1);
- }
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
@@ -342,16 +334,15 @@
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(arg0,
+ codegen->EmitParallelMoves(locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- arg1,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
- CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 45edff8..7e4ad26 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -312,14 +312,6 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location arg0, arg1;
- if (instruction_->IsInstanceOf()) {
- arg0 = locations->InAt(1);
- arg1 = locations->Out();
- } else {
- arg0 = locations->InAt(0);
- arg1 = locations->InAt(1);
- }
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -333,10 +325,10 @@
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->EmitParallelMoves(arg0,
+ x86_codegen->EmitParallelMoves(locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- arg1,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
@@ -344,7 +336,7 @@
instruction_,
instruction_->GetDexPc(),
this);
- CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
x86_codegen->InvokeRuntime(kQuickCheckInstanceOf,
@@ -6422,15 +6414,14 @@
__ j(kEqual, &zero);
}
- // /* HeapReference<Class> */ out = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- out_loc,
- obj_loc,
- class_offset,
- kCompilerReadBarrierOption);
-
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ kCompilerReadBarrierOption);
if (cls.IsRegister()) {
__ cmpl(out, cls.AsRegister<Register>());
} else {
@@ -6446,6 +6437,12 @@
}
case TypeCheckKind::kAbstractClassCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ kCompilerReadBarrierOption);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
NearLabel loop;
@@ -6474,6 +6471,12 @@
}
case TypeCheckKind::kClassHierarchyCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ kCompilerReadBarrierOption);
// Walk over the class hierarchy to find a match.
NearLabel loop, success;
__ Bind(&loop);
@@ -6503,6 +6506,12 @@
}
case TypeCheckKind::kArrayObjectCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ kCompilerReadBarrierOption);
// Do an exact check.
NearLabel exact_check;
if (cls.IsRegister()) {
@@ -6531,6 +6540,13 @@
}
case TypeCheckKind::kArrayCheck: {
+ // No read barrier since the slow path will retry upon failure.
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ kWithoutReadBarrier);
if (cls.IsRegister()) {
__ cmpl(out, cls.AsRegister<Register>());
} else {
@@ -6833,24 +6849,24 @@
temp_loc,
iftable_offset,
kWithoutReadBarrier);
- // Null iftable means it is empty.
- __ testl(temp, temp);
- __ j(kZero, type_check_slow_path->GetEntryLabel());
-
- // Loop through the iftable and check if any class matches.
+ // Iftable is never null.
__ movl(maybe_temp2_loc.AsRegister<Register>(), Address(temp, array_length_offset));
-
+ // Loop through the iftable and check if any class matches.
NearLabel start_loop;
__ Bind(&start_loop);
- __ cmpl(cls.AsRegister<Register>(), Address(temp, object_array_data_offset));
- __ j(kEqual, &done); // Return if same class.
- // Go to next interface.
- __ addl(temp, Immediate(2 * kHeapReferenceSize));
+ // Need to subtract first to handle the empty array case.
__ subl(maybe_temp2_loc.AsRegister<Register>(), Immediate(2));
- __ j(kNotZero, &start_loop);
+ __ j(kNegative, type_check_slow_path->GetEntryLabel());
+ // Go to next interface if the classes do not match.
+ __ cmpl(cls.AsRegister<Register>(),
+ CodeGeneratorX86::ArrayAddress(temp,
+ maybe_temp2_loc,
+ TIMES_4,
+ object_array_data_offset));
+ __ j(kNotEqual, &start_loop);
+ } else {
+ __ jmp(type_check_slow_path->GetEntryLabel());
}
-
- __ jmp(type_check_slow_path->GetEntryLabel());
break;
}
}
@@ -7546,7 +7562,7 @@
// The value to patch is the distance from the offset in the constant area
// from the address computed by the HX86ComputeBaseMethodAddress instruction.
int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
- int32_t relative_position = constant_offset - codegen_->GetMethodAddressOffset();;
+ int32_t relative_position = constant_offset - codegen_->GetMethodAddressOffset();
// Patch in the right value.
region.StoreUnaligned<int32_t>(pos - 4, relative_position);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f7a2f40..19b3019 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -332,14 +332,6 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location arg0, arg1;
- if (instruction_->IsInstanceOf()) {
- arg0 = locations->InAt(1);
- arg1 = locations->Out();
- } else {
- arg0 = locations->InAt(0);
- arg1 = locations->InAt(1);
- }
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -354,15 +346,15 @@
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(arg0,
+ codegen->EmitParallelMoves(locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- arg1,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
x86_64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
x86_64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
@@ -5826,15 +5818,14 @@
__ j(kEqual, &zero);
}
- // /* HeapReference<Class> */ out = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- out_loc,
- obj_loc,
- class_offset,
- kCompilerReadBarrierOption);
-
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ kCompilerReadBarrierOption);
if (cls.IsRegister()) {
__ cmpl(out, cls.AsRegister<CpuRegister>());
} else {
@@ -5855,6 +5846,12 @@
}
case TypeCheckKind::kAbstractClassCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ kCompilerReadBarrierOption);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
NearLabel loop, success;
@@ -5883,6 +5880,12 @@
}
case TypeCheckKind::kClassHierarchyCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ kCompilerReadBarrierOption);
// Walk over the class hierarchy to find a match.
NearLabel loop, success;
__ Bind(&loop);
@@ -5912,6 +5915,12 @@
}
case TypeCheckKind::kArrayObjectCheck: {
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ kCompilerReadBarrierOption);
// Do an exact check.
NearLabel exact_check;
if (cls.IsRegister()) {
@@ -5940,6 +5949,13 @@
}
case TypeCheckKind::kArrayCheck: {
+ // No read barrier since the slow path will retry upon failure.
+ // /* HeapReference<Class> */ out = obj->klass_
+ GenerateReferenceLoadTwoRegisters(instruction,
+ out_loc,
+ obj_loc,
+ class_offset,
+ kWithoutReadBarrier);
if (cls.IsRegister()) {
__ cmpl(out, cls.AsRegister<CpuRegister>());
} else {
@@ -6243,23 +6259,24 @@
temp_loc,
iftable_offset,
kWithoutReadBarrier);
- // Null iftable means it is empty.
- __ testl(temp, temp);
- __ j(kZero, type_check_slow_path->GetEntryLabel());
-
- // Loop through the iftable and check if any class matches.
+ // Iftable is never null.
__ movl(maybe_temp2_loc.AsRegister<CpuRegister>(), Address(temp, array_length_offset));
-
+ // Loop through the iftable and check if any class matches.
NearLabel start_loop;
__ Bind(&start_loop);
- __ cmpl(cls.AsRegister<CpuRegister>(), Address(temp, object_array_data_offset));
- __ j(kEqual, &done); // Return if same class.
- // Go to next interface.
- __ addl(temp, Immediate(2 * kHeapReferenceSize));
+ // Need to subtract first to handle the empty array case.
__ subl(maybe_temp2_loc.AsRegister<CpuRegister>(), Immediate(2));
- __ j(kNotZero, &start_loop);
+ __ j(kNegative, type_check_slow_path->GetEntryLabel());
+ // Go to next interface if the classes do not match.
+ __ cmpl(cls.AsRegister<CpuRegister>(),
+ CodeGeneratorX86_64::ArrayAddress(temp,
+ maybe_temp2_loc,
+ TIMES_4,
+ object_array_data_offset));
+ __ j(kNotEqual, &start_loop); // Return if same class.
+ } else {
+ __ jmp(type_check_slow_path->GetEntryLabel());
}
- __ jmp(type_check_slow_path->GetEntryLabel());
break;
}
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 8a9fd90..23b2774 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -49,7 +49,7 @@
return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
}
-static constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);;
+static constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
void ArmVIXLJNIMacroAssembler::BuildFrame(size_t frame_size,
ManagedRegister method_reg,
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index c37c72b..65703a2 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2361,6 +2361,11 @@
LOG(ERROR) << "Failed to create runtime";
return false;
}
+
+ // Runtime::Init will rename this thread to be "main". Prefer "dex2oat" so that "top" and
+ // "ps -a" don't change to non-descript "main."
+ SetThreadName(kIsDebugBuild ? "dex2oatd" : "dex2oat");
+
runtime_.reset(Runtime::Current());
runtime_->SetInstructionSet(instruction_set_);
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 3d208b5..4c01c14 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -2783,7 +2783,7 @@
bool result = klass->GetImt(pointer_size) == object_class->GetImt(pointer_size);
- if (klass->GetIfTable() == nullptr) {
+ if (klass->GetIfTable()->Count() == 0) {
DCHECK(result);
}
@@ -2889,25 +2889,23 @@
std::cerr << " Interfaces:" << std::endl;
// Run through iftable, find methods that slot here, see if they fit.
mirror::IfTable* if_table = klass->GetIfTable();
- if (if_table != nullptr) {
- for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) {
- mirror::Class* iface = if_table->GetInterface(i);
- std::string iface_name;
- std::cerr << " " << iface->GetDescriptor(&iface_name) << std::endl;
+ for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) {
+ mirror::Class* iface = if_table->GetInterface(i);
+ std::string iface_name;
+ std::cerr << " " << iface->GetDescriptor(&iface_name) << std::endl;
- for (ArtMethod& iface_method : iface->GetVirtualMethods(pointer_size)) {
- uint32_t class_hash, name_hash, signature_hash;
- ImTable::GetImtHashComponents(&iface_method, &class_hash, &name_hash, &signature_hash);
- uint32_t imt_slot = ImTable::GetImtIndex(&iface_method);
- std::cerr << " " << iface_method.PrettyMethod(true)
- << " slot=" << imt_slot
- << std::hex
- << " class_hash=0x" << class_hash
- << " name_hash=0x" << name_hash
- << " signature_hash=0x" << signature_hash
- << std::dec
- << std::endl;
- }
+ for (ArtMethod& iface_method : iface->GetVirtualMethods(pointer_size)) {
+ uint32_t class_hash, name_hash, signature_hash;
+ ImTable::GetImtHashComponents(&iface_method, &class_hash, &name_hash, &signature_hash);
+ uint32_t imt_slot = ImTable::GetImtIndex(&iface_method);
+ std::cerr << " " << iface_method.PrettyMethod(true)
+ << " slot=" << imt_slot
+ << std::hex
+ << " class_hash=0x" << class_hash
+ << " name_hash=0x" << name_hash
+ << " signature_hash=0x" << signature_hash
+ << std::dec
+ << std::endl;
}
}
}
@@ -2972,18 +2970,16 @@
} else {
// Run through iftable, find methods that slot here, see if they fit.
mirror::IfTable* if_table = klass->GetIfTable();
- if (if_table != nullptr) {
- for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) {
- mirror::Class* iface = if_table->GetInterface(i);
- size_t num_methods = iface->NumDeclaredVirtualMethods();
- if (num_methods > 0) {
- for (ArtMethod& iface_method : iface->GetMethods(pointer_size)) {
- if (ImTable::GetImtIndex(&iface_method) == index) {
- std::string i_name = iface_method.PrettyMethod(true);
- if (StartsWith(i_name, method.c_str())) {
- std::cerr << " Slot " << index << " (1)" << std::endl;
- std::cerr << " " << p_name << " (" << i_name << ")" << std::endl;
- }
+ for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) {
+ mirror::Class* iface = if_table->GetInterface(i);
+ size_t num_methods = iface->NumDeclaredVirtualMethods();
+ if (num_methods > 0) {
+ for (ArtMethod& iface_method : iface->GetMethods(pointer_size)) {
+ if (ImTable::GetImtIndex(&iface_method) == index) {
+ std::string i_name = iface_method.PrettyMethod(true);
+ if (StartsWith(i_name, method.c_str())) {
+ std::cerr << " Slot " << index << " (1)" << std::endl;
+ std::cerr << " " << p_name << " (" << i_name << ")" << std::endl;
}
}
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 3c8c1a3..5dc1457 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -762,16 +762,14 @@
if (vtable != nullptr) {
vtable->Fixup(RelocatedCopyOfFollowImages(vtable), pointer_size, native_visitor);
}
- auto* iftable = klass->GetIfTable();
- if (iftable != nullptr) {
- for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
- if (iftable->GetMethodArrayCount(i) > 0) {
- auto* method_array = iftable->GetMethodArray(i);
- CHECK(method_array != nullptr);
- method_array->Fixup(RelocatedCopyOfFollowImages(method_array),
- pointer_size,
- native_visitor);
- }
+ mirror::IfTable* iftable = klass->GetIfTable();
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ if (iftable->GetMethodArrayCount(i) > 0) {
+ auto* method_array = iftable->GetMethodArray(i);
+ CHECK(method_array != nullptr);
+ method_array->Fixup(RelocatedCopyOfFollowImages(method_array),
+ pointer_size,
+ native_visitor);
}
}
} else if (object->GetClass() == mirror::Method::StaticClass() ||
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 5bd6b56..de72d3a 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -30,7 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -86,7 +86,7 @@
DefaultInitEntryPoints(jpoints, qpoints);
// Cast
- qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+ qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// Math
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index e7c9fef..6add107 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -30,7 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -115,7 +115,7 @@
DefaultInitEntryPoints(jpoints, qpoints);
// Cast
- qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+ qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// Math
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 6dca46f..cb0bdbf 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -30,7 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
@@ -74,7 +74,7 @@
ResetQuickAllocEntryPoints(qpoints);
// Cast
- qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+ qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
static_assert(IsDirectEntrypoint(kQuickInstanceofNonTrivial), "Direct C stub not marked direct.");
qpoints->pCheckInstanceOf = art_quick_check_instance_of;
static_assert(!IsDirectEntrypoint(kQuickCheckInstanceOf), "Non-direct C stub marked direct.");
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 0e81906..bc17d47 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -30,7 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
@@ -67,7 +67,7 @@
DefaultInitEntryPoints(jpoints, qpoints);
// Cast
- qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+ qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// Math
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 94fea69..9cd4a3e 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -27,7 +27,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t art_quick_is_assignable(mirror::Class* klass, mirror::Class* ref_class);
+extern "C" size_t art_quick_instance_of(mirror::Object* obj, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -58,7 +58,7 @@
DefaultInitEntryPoints(jpoints, qpoints);
// Cast
- qpoints->pInstanceofNonTrivial = art_quick_is_assignable;
+ qpoints->pInstanceofNonTrivial = art_quick_instance_of;
qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// More math.
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 761a510..fb405fa 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1351,15 +1351,15 @@
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object_no_inline
-DEFINE_FUNCTION art_quick_is_assignable
+DEFINE_FUNCTION art_quick_instance_of
PUSH eax // alignment padding
PUSH ecx // pass arg2 - obj->klass
PUSH eax // pass arg1 - checked class
- call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
-END_FUNCTION art_quick_is_assignable
+END_FUNCTION art_quick_instance_of
DEFINE_FUNCTION art_quick_check_instance_of
PUSH eax // alignment padding
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 6b66e62..a326b4e 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -30,7 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t art_quick_assignable_from_code(mirror::Class* klass, mirror::Class* ref_class);
+extern "C" size_t art_quick_instance_of(mirror::Object* obj, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -81,7 +81,7 @@
DefaultInitEntryPoints(jpoints, qpoints);
// Cast
- qpoints->pInstanceofNonTrivial = art_quick_assignable_from_code;
+ qpoints->pInstanceofNonTrivial = art_quick_instance_of;
qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// More math.
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 20ee3f5..860b77e 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -2223,16 +2223,16 @@
UNIMPLEMENTED art_quick_memcmp16
-DEFINE_FUNCTION art_quick_assignable_from_code
+DEFINE_FUNCTION art_quick_instance_of
SETUP_FP_CALLEE_SAVE_FRAME
subq LITERAL(8), %rsp // Alignment padding.
CFI_ADJUST_CFA_OFFSET(8)
- call SYMBOL(artIsAssignableFromCode) // (const mirror::Class*, const mirror::Class*)
+ call SYMBOL(artInstanceOfFromCode) // (mirror::Object*, mirror::Class*)
addq LITERAL(8), %rsp
CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_FP_CALLEE_SAVE_FRAME
ret
-END_FUNCTION art_quick_assignable_from_code
+END_FUNCTION art_quick_instance_of
// Return from a nested signal:
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 350855b..7359243 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -69,7 +69,7 @@
Thread::PoisonObjectPointersIfDebug();
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
// MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx, declaring_class->GetDexFile().NumStringIds());;
+ DCHECK_LT(string_idx, declaring_class->GetDexFile().NumStringIds());
ObjPtr<mirror::String> string =
mirror::StringDexCachePair::Lookup(declaring_class->GetDexCacheStrings(),
string_idx,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c8875f4..65e46c2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -475,6 +475,9 @@
SetClassRoot(kJavaLangString, java_lang_String.Get());
SetClassRoot(kJavaLangRefReference, java_lang_ref_Reference.Get());
+ // Fill in the empty iftable. Needs to be done after the kObjectArrayClass root is set.
+ java_lang_Object->SetIfTable(AllocIfTable(self, 0));
+
// Setup the primitive type classes.
SetClassRoot(kPrimitiveBoolean, CreatePrimitiveClass(self, Primitive::kPrimBoolean));
SetClassRoot(kPrimitiveByte, CreatePrimitiveClass(self, Primitive::kPrimByte));
@@ -916,13 +919,11 @@
SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr, image_spaces);
}
}
- auto* iftable = klass->GetIfTable();
- if (iftable != nullptr) {
- for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
- if (iftable->GetMethodArrayCount(i) > 0) {
- SanityCheckArtMethodPointerArray(
- iftable->GetMethodArray(i), nullptr, pointer_size, image_spaces);
- }
+ mirror::IfTable* iftable = klass->GetIfTable();
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ if (iftable->GetMethodArrayCount(i) > 0) {
+ SanityCheckArtMethodPointerArray(
+ iftable->GetMethodArray(i), nullptr, pointer_size, image_spaces);
}
}
}
@@ -2828,6 +2829,13 @@
return true;
}
+ if (runtime->IsFullyDeoptable()) {
+ // We need to be able to deoptimize at any time so we should always just ignore precompiled
+ // code and go to the interpreter assuming we don't already have jitted code.
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ return (jit == nullptr) || !jit->GetCodeCache()->ContainsPc(quick_code);
+ }
+
if (runtime->IsNativeDebuggable()) {
DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
// If we are doing native debugging, ignore application's AOT code,
@@ -3394,7 +3402,8 @@
}
mirror::Class* ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) {
- ObjPtr<mirror::Class> klass = AllocClass(self, mirror::Class::PrimitiveClassSize(image_pointer_size_));
+ ObjPtr<mirror::Class> klass =
+ AllocClass(self, mirror::Class::PrimitiveClassSize(image_pointer_size_));
if (UNLIKELY(klass == nullptr)) {
self->AssertPendingOOMException();
return nullptr;
@@ -3412,10 +3421,12 @@
ObjectLock<mirror::Class> lock(self, h_class);
h_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
h_class->SetPrimitiveType(type);
+ h_class->SetIfTable(GetClassRoot(kJavaLangObject)->GetIfTable());
mirror::Class::SetStatus(h_class, mirror::Class::kStatusInitialized, self);
const char* descriptor = Primitive::Descriptor(type);
- ObjPtr<mirror::Class> existing = InsertClass(descriptor, h_class.Get(),
- ComputeModifiedUtf8Hash(descriptor));
+ ObjPtr<mirror::Class> existing = InsertClass(descriptor,
+ h_class.Get(),
+ ComputeModifiedUtf8Hash(descriptor));
CHECK(existing == nullptr) << "InitPrimitiveClass(" << type << ") failed";
return h_class.Get();
}
@@ -4114,6 +4125,8 @@
DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
klass->SetName(soa.Decode<mirror::String>(name));
klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
+ // Object has an empty iftable, copy it for that reason.
+ klass->SetIfTable(GetClassRoot(kJavaLangObject)->GetIfTable());
mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
std::string descriptor(GetDescriptorForProxy(klass.Get()));
const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
@@ -6374,16 +6387,18 @@
bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
StackHandleScope<1> hs(self);
- const size_t super_ifcount =
- klass->HasSuperClass() ? klass->GetSuperClass()->GetIfTableCount() : 0U;
+ const bool has_superclass = klass->HasSuperClass();
+ const size_t super_ifcount = has_superclass ? klass->GetSuperClass()->GetIfTableCount() : 0U;
const bool have_interfaces = interfaces.Get() != nullptr;
const size_t num_interfaces =
have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces();
if (num_interfaces == 0) {
if (super_ifcount == 0) {
+ if (LIKELY(has_superclass)) {
+ klass->SetIfTable(klass->GetSuperClass()->GetIfTable());
+ }
// Class implements no interfaces.
DCHECK_EQ(klass->GetIfTableCount(), 0);
- DCHECK(klass->GetIfTable() == nullptr);
return true;
}
// Class implements same interfaces as parent, are any of these not marker interfaces?
@@ -6576,7 +6591,7 @@
} else {
// No imt in the super class, need to reconstruct from the iftable.
ObjPtr<mirror::IfTable> if_table = super_class->GetIfTable();
- if (if_table != nullptr) {
+ if (if_table->Count() != 0) {
// Ignore copied methods since we will handle these in LinkInterfaceMethods.
FillIMTFromIfTable(if_table,
unimplemented_method,
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index ab2d9d0..44590ba 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -101,7 +101,8 @@
EXPECT_EQ(0U, primitive->NumDirectInterfaces());
EXPECT_FALSE(primitive->HasVTable());
EXPECT_EQ(0, primitive->GetIfTableCount());
- EXPECT_TRUE(primitive->GetIfTable() == nullptr);
+ EXPECT_TRUE(primitive->GetIfTable() != nullptr);
+ EXPECT_EQ(primitive->GetIfTable()->Count(), 0u);
EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract, primitive->GetAccessFlags());
}
diff --git a/runtime/class_table.h b/runtime/class_table.h
index bc9eaf4..558c144 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -48,7 +48,7 @@
uint32_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS;
// Same class loader and descriptor.
bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const
- NO_THREAD_SAFETY_ANALYSIS;;
+ NO_THREAD_SAFETY_ANALYSIS;
// Same descriptor.
bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const
NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index dd8fe55..a1c5082 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -33,7 +33,7 @@
V(AllocStringFromChars, void*, int32_t, int32_t, void*) \
V(AllocStringFromString, void*, void*) \
\
- V(InstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*) \
+ V(InstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*) \
V(CheckInstanceOf, void, mirror::Object*, mirror::Class*) \
\
V(InitializeStaticStorage, void*, uint32_t) \
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 0b602e9..6019540 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1002,7 +1002,7 @@
mirror::IfTable* iftable = as_klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
// Ensure iftable arrays are fixed up since we need GetMethodArray to return the valid
// contents.
- if (iftable != nullptr && IsInAppImage(iftable)) {
+ if (IsInAppImage(iftable)) {
operator()(iftable);
for (int32_t i = 0, count = iftable->Count(); i < count; ++i) {
if (iftable->GetMethodArrayCount<kVerifyNone, kWithoutReadBarrier>(i) > 0) {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index cb775cd..8c63a9e 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -1020,7 +1020,7 @@
} else {
DCHECK(!is_range);
ArtField* field = method_handle->GetTargetField();
- Primitive::Type field_type = field->GetTypeAsPrimitiveType();;
+ Primitive::Type field_type = field->GetTypeAsPrimitiveType();
switch (handle_kind) {
case kInstanceGet: {
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 2257fd6..a5b1038 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -424,7 +424,7 @@
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(jar_file.c_str(), error_msg));
if (zip_archive == nullptr) {
- return nullptr;;
+ return nullptr;
}
std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(entry_name, error_msg));
if (zip_entry == nullptr) {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 4c10063..23a5ddd 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -114,7 +114,7 @@
} else {
jit_options->invoke_transition_weight_ = std::max(
jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio,
- static_cast<size_t>(1));;
+ static_cast<size_t>(1));
}
return jit_options;
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index bbdb2af..9a6d60e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -526,18 +526,17 @@
template<VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption>
inline IfTable* Class::GetIfTable() {
- return GetFieldObject<IfTable, kVerifyFlags, kReadBarrierOption>(IfTableOffset());
+ ObjPtr<IfTable> ret = GetFieldObject<IfTable, kVerifyFlags, kReadBarrierOption>(IfTableOffset());
+ DCHECK(ret != nullptr) << PrettyClass(this);
+ return ret.Ptr();
}
inline int32_t Class::GetIfTableCount() {
- ObjPtr<IfTable> iftable = GetIfTable();
- if (iftable == nullptr) {
- return 0;
- }
- return iftable->Count();
+ return GetIfTable()->Count();
}
inline void Class::SetIfTable(ObjPtr<IfTable> new_iftable) {
+ DCHECK(new_iftable != nullptr) << PrettyClass(this);
SetFieldObject<false>(IfTableOffset(), new_iftable);
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 814a493..8c84d42 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '9', '0', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '9', '2', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 651a6ee..5641459 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -403,9 +403,13 @@
DexFileAndClassPair pair(temp.top());
const DexFile* dex_file = pair.GetDexFile();
const std::string& dex_filename = dex_file->GetLocation();
+ if (dex_filename != shared_libraries_split[index]) {
+ break;
+ }
+ char* end;
+ size_t shared_lib_checksum = strtoul(shared_libraries_split[index + 1].c_str(), &end, 10);
uint32_t dex_checksum = dex_file->GetLocationChecksum();
- if (dex_filename != shared_libraries_split[index] ||
- dex_checksum != std::stoul(shared_libraries_split[index + 1])) {
+ if (*end != '\0' || dex_checksum != shared_lib_checksum) {
break;
}
temp.pop();
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 56eab5e..e1022b0 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -300,6 +300,8 @@
.Define("-Xplugin:_")
.WithType<std::vector<Plugin>>().AppendValues()
.IntoKey(M::Plugins)
+ .Define("-Xfully-deoptable")
+ .IntoKey(M::FullyDeoptable)
.Ignore({
"-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 16ed7fb..1c975a4 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -215,33 +215,87 @@
}
std::sort(sorted_entries.begin(), sorted_entries.end(), GcRootComparator());
+ class SummaryElement {
+ public:
+ GcRoot<mirror::Object> root;
+ size_t equiv;
+ size_t identical;
+
+ SummaryElement() : equiv(0), identical(0) {}
+ SummaryElement(SummaryElement&& ref) {
+ root = ref.root;
+ equiv = ref.equiv;
+ identical = ref.identical;
+ }
+ SummaryElement(const SummaryElement&) = default;
+ SummaryElement& operator=(SummaryElement&&) = default;
+
+ void Reset(GcRoot<mirror::Object>& _root) {
+ root = _root;
+ equiv = 0;
+ identical = 0;
+ }
+ };
+ std::vector<SummaryElement> sorted_summaries;
+ {
+ SummaryElement prev;
+
+ for (GcRoot<mirror::Object>& root : sorted_entries) {
+ ObjPtr<mirror::Object> current = root.Read<kWithoutReadBarrier>();
+
+ if (UNLIKELY(prev.root.IsNull())) {
+ prev.Reset(root);
+ continue;
+ }
+
+ ObjPtr<mirror::Object> prevObj = prev.root.Read<kWithoutReadBarrier>();
+ if (current == prevObj) {
+ // Same reference, added more than once.
+ ++prev.identical;
+ } else if (current->GetClass() == prevObj->GetClass() &&
+ GetElementCount(current) == GetElementCount(prevObj)) {
+ // Same class / element count, different object.
+ ++prev.equiv;
+ } else {
+ sorted_summaries.push_back(prev);
+ prev.Reset(root);
+ }
+ prev.root = root;
+ }
+ sorted_summaries.push_back(prev);
+
+ // Compare summary elements, first by combined count, then by identical (indicating leaks),
+ // then by class (and size and address).
+ struct SummaryElementComparator {
+ GcRootComparator gc_root_cmp;
+
+ bool operator()(SummaryElement& elem1, SummaryElement& elem2) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+
+ size_t count1 = elem1.equiv + elem1.identical;
+ size_t count2 = elem2.equiv + elem2.identical;
+ if (count1 != count2) {
+ return count1 > count2;
+ }
+
+ if (elem1.identical != elem2.identical) {
+ return elem1.identical > elem2.identical;
+ }
+
+ // Otherwise, compare the GC roots as before.
+ return gc_root_cmp(elem1.root, elem2.root);
+ }
+ };
+ std::sort(sorted_summaries.begin(), sorted_summaries.end(), SummaryElementComparator());
+ }
+
// Dump a summary of the whole table.
os << " Summary:\n";
- size_t equiv = 0;
- size_t identical = 0;
- ObjPtr<mirror::Object> prev = nullptr;
- for (GcRoot<mirror::Object>& root : sorted_entries) {
- ObjPtr<mirror::Object> current = root.Read<kWithoutReadBarrier>();
- if (prev != nullptr) {
- const size_t element_count = GetElementCount(prev);
- if (current == prev) {
- // Same reference, added more than once.
- ++identical;
- } else if (current->GetClass() == prev->GetClass() &&
- GetElementCount(current) == element_count) {
- // Same class / element count, different object.
- ++equiv;
- } else {
- // Different class.
- DumpSummaryLine(os, prev, element_count, identical, equiv);
- equiv = 0;
- identical = 0;
- }
- }
- prev = current;
+ for (SummaryElement& elem : sorted_summaries) {
+ ObjPtr<mirror::Object> elemObj = elem.root.Read<kWithoutReadBarrier>();
+ DumpSummaryLine(os, elemObj, GetElementCount(elemObj), elem.identical, elem.equiv);
}
- // Handle the last entry.
- DumpSummaryLine(os, prev, GetElementCount(prev), identical, equiv);
}
void ReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index 489db9a..d80a9b3 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -166,4 +166,77 @@
}
}
+static std::vector<size_t> FindAll(const std::string& haystack, const char* needle) {
+ std::vector<size_t> res;
+ size_t start = 0;
+ do {
+ size_t pos = haystack.find(needle, start);
+ if (pos == std::string::npos) {
+ break;
+ }
+ res.push_back(pos);
+ start = pos + 1;
+ } while (start < haystack.size());
+ return res;
+}
+
+TEST_F(ReferenceTableTest, SummaryOrder) {
+ // Check that the summary statistics are sorted.
+ ScopedObjectAccess soa(Thread::Current());
+
+ ReferenceTable rt("test", 0, 20);
+
+ {
+ mirror::Object* s1 = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello");
+ mirror::Object* s2 = mirror::String::AllocFromModifiedUtf8(soa.Self(), "world");
+
+ // 3 copies of s1, 2 copies of s2, interleaved.
+ for (size_t i = 0; i != 2; ++i) {
+ rt.Add(s1);
+ rt.Add(s2);
+ }
+ rt.Add(s1);
+ }
+
+ {
+ // Differently sized byte arrays. Should be sorted by identical (non-unique cound).
+ mirror::Object* b1_1 = mirror::ByteArray::Alloc(soa.Self(), 1);
+ rt.Add(b1_1);
+ rt.Add(mirror::ByteArray::Alloc(soa.Self(), 2));
+ rt.Add(b1_1);
+ rt.Add(mirror::ByteArray::Alloc(soa.Self(), 2));
+ rt.Add(mirror::ByteArray::Alloc(soa.Self(), 1));
+ rt.Add(mirror::ByteArray::Alloc(soa.Self(), 2));
+ }
+
+ rt.Add(mirror::CharArray::Alloc(soa.Self(), 0));
+
+ // Now dump, and ensure order.
+ std::ostringstream oss;
+ rt.Dump(oss);
+
+ // Only do this on the part after Summary.
+ std::string base = oss.str();
+ size_t summary_pos = base.find("Summary:");
+ ASSERT_NE(summary_pos, std::string::npos);
+
+ std::string haystack = base.substr(summary_pos);
+
+ std::vector<size_t> strCounts = FindAll(haystack, "java.lang.String");
+ std::vector<size_t> b1Counts = FindAll(haystack, "byte[] (1 elements)");
+ std::vector<size_t> b2Counts = FindAll(haystack, "byte[] (2 elements)");
+ std::vector<size_t> cCounts = FindAll(haystack, "char[]");
+
+ // Only one each.
+ EXPECT_EQ(1u, strCounts.size());
+ EXPECT_EQ(1u, b1Counts.size());
+ EXPECT_EQ(1u, b2Counts.size());
+ EXPECT_EQ(1u, cCounts.size());
+
+ // Expect them to be in order.
+ EXPECT_LT(strCounts[0], b1Counts[0]);
+ EXPECT_LT(b1Counts[0], b2Counts[0]);
+ EXPECT_LT(b2Counts[0], cCounts[0]);
+}
+
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a1b6866..09a0462 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -239,6 +239,7 @@
force_native_bridge_(false),
is_native_bridge_loaded_(false),
is_native_debuggable_(false),
+ is_fully_deoptable_(false),
zygote_max_failed_boots_(0),
experimental_flags_(ExperimentalFlags::kNone),
oat_file_manager_(nullptr),
@@ -764,6 +765,9 @@
}
bool Runtime::IsDebuggable() const {
+ if (IsFullyDeoptable()) {
+ return true;
+ }
const OatFile* oat_file = GetOatFileManager().GetPrimaryOatFile();
return oat_file != nullptr && oat_file->IsDebuggable();
}
@@ -987,6 +991,8 @@
verify_ = runtime_options.GetOrDefault(Opt::Verify);
allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
+ is_fully_deoptable_ = runtime_options.Exists(Opt::FullyDeoptable);
+
no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 6806180..de5a356 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -592,6 +592,14 @@
bool IsDebuggable() const;
+ bool IsFullyDeoptable() const {
+ return is_fully_deoptable_;
+ }
+
+ void SetFullyDeoptable(bool value) {
+ is_fully_deoptable_ = value;
+ }
+
bool IsNativeDebuggable() const {
return is_native_debuggable_;
}
@@ -857,6 +865,9 @@
// Whether we are running under native debugger.
bool is_native_debuggable_;
+ // Whether we are expected to be deoptable at all points.
+ bool is_fully_deoptable_;
+
// The maximum number of failed boots we allow before pruning the dalvik cache
// and trying again. This option is only inspected when we're running as a
// zygote.
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index b01a570..d1970fe 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -121,6 +121,7 @@
RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>, Requires -Xexperimental:agents
RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>, Requires -Xexperimental:agents
RUNTIME_OPTIONS_KEY (std::vector<Plugin>, Plugins) // -Xplugin:<library> Requires -Xexperimental:runtime-plugins
+RUNTIME_OPTIONS_KEY (Unit, FullyDeoptable) // -Xfully-deoptable
// Not parse-able from command line, but can be provided explicitly.
// (Do not add anything here that is defined in ParsedOptions::MakeParser)
diff --git a/test/445-checker-licm/expected.txt b/test/445-checker-licm/expected.txt
index e69de29..b0aad4d 100644
--- a/test/445-checker-licm/expected.txt
+++ b/test/445-checker-licm/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/445-checker-licm/src/Main.java b/test/445-checker-licm/src/Main.java
index 061fe6e..00ce3a9 100644
--- a/test/445-checker-licm/src/Main.java
+++ b/test/445-checker-licm/src/Main.java
@@ -164,8 +164,43 @@
return result;
}
+ //
+ // All operations up to the null check can be hoisted out of the
+ // loop. The null check itself sees the induction in its environment.
+ //
+ /// CHECK-START: int Main.doWhile(int) licm (before)
+ /// CHECK-DAG: <<Add:i\d+>> Add loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: LoadClass loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:l\d+>> StaticFieldGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: NullCheck [<<Get>>] env:[[<<Add>>,<<Get>>,{{i\d+}}]] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayLength loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.doWhile(int) licm (after)
+ /// CHECK-NOT: LoadClass loop:{{B\d+}}
+ /// CHECK-NOT: StaticFieldGet loop:{{B\d+}}
+ //
+ /// CHECK-START: int Main.doWhile(int) licm (after)
+ /// CHECK-DAG: LoadClass loop:none
+ /// CHECK-DAG: <<Get:l\d+>> StaticFieldGet loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: NullCheck [<<Get>>] env:[[<<Add>>,<<Get>>,{{i\d+}}]] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayLength loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ public static int doWhile(int k) {
+ int i = k;
+ do {
+ i += 2;
+ } while (staticArray[i] == 0);
+ return i;
+ }
+
public static int staticField = 42;
+ public static int[] staticArray = null;
+
public static void assertEquals(int expected, int actual) {
if (expected != actual) {
throw new Error("Expected " + expected + ", got " + actual);
@@ -181,5 +216,24 @@
assertEquals(21, divAndIntrinsic(new int[] { 4, -2, 8, -3 }));
assertEquals(45, invariantBoundIntrinsic(-10));
assertEquals(30, invariantBodyIntrinsic(2, 3));
+
+ staticArray = null;
+ try {
+ doWhile(0);
+ throw new Error("Expected NPE");
+ } catch (NullPointerException e) {
+ }
+ staticArray = new int[5];
+ staticArray[4] = 1;
+ assertEquals(4, doWhile(-2));
+ assertEquals(4, doWhile(0));
+ assertEquals(4, doWhile(2));
+ try {
+ doWhile(1);
+ throw new Error("Expected IOOBE");
+ } catch (IndexOutOfBoundsException e) {
+ }
+
+ System.out.println("passed");
}
}
diff --git a/test/530-checker-loops3/src/Main.java b/test/530-checker-loops3/src/Main.java
index 6b5c657..209786a 100644
--- a/test/530-checker-loops3/src/Main.java
+++ b/test/530-checker-loops3/src/Main.java
@@ -246,7 +246,7 @@
oneConstantIndex(a, b);
for (int i = 0; i < a.length; i++) {
- expectEquals(2, a[i]);;
+ expectEquals(2, a[i]);
}
try {
oneConstantIndex(a, b1);
@@ -256,7 +256,7 @@
multipleConstantIndices(a, b);
for (int i = 0; i < a.length; i++) {
- expectEquals(6, a[i]);;
+ expectEquals(6, a[i]);
}
try {
multipleConstantIndices(a, b1);
@@ -266,7 +266,7 @@
oneInvariantIndex(a, b, 1);
for (int i = 0; i < a.length; i++) {
- expectEquals(2, a[i]);;
+ expectEquals(2, a[i]);
}
try {
oneInvariantIndex(a, b1, 1);
@@ -276,7 +276,7 @@
multipleInvariantIndices(a, b, 1);
for (int i = 0; i < a.length; i++) {
- expectEquals(6, a[i]);;
+ expectEquals(6, a[i]);
}
try {
multipleInvariantIndices(a, b1, 1);
@@ -286,18 +286,18 @@
oneUnitStride(a, b);
for (int i = 0; i < a.length; i++) {
- expectEquals(i + 1, a[i]);;
+ expectEquals(i + 1, a[i]);
}
try {
oneUnitStride(a, b1);
throw new Error("Should throw AIOOBE");
} catch (ArrayIndexOutOfBoundsException e) {
- expectEquals(100, a[0]);;
+ expectEquals(100, a[0]);
}
multipleUnitStrides(a, b);
for (int i = 1; i < a.length - 1; i++) {
- expectEquals(3 * i + 3, a[i]);;
+ expectEquals(3 * i + 3, a[i]);
}
try {
multipleUnitStrides(a, b1);
@@ -308,7 +308,7 @@
multipleUnitStridesConditional(a, b);
for (int i = 2; i < a.length - 2; i++) {
int e = 3 * i + 3 + (((i & 1) == 0) ? i + 2 : i);
- expectEquals(e, a[i]);;
+ expectEquals(e, a[i]);
}
try {
multipleUnitStridesConditional(a, b1);
diff --git a/test/586-checker-null-array-get/src/Main.java b/test/586-checker-null-array-get/src/Main.java
index e0782bc..0ea7d34 100644
--- a/test/586-checker-null-array-get/src/Main.java
+++ b/test/586-checker-null-array-get/src/Main.java
@@ -100,7 +100,7 @@
/// CHECK-DAG: Return [<<ArrayGet2>>]
public static float test1() {
Test1 test1 = getNullTest1();
- Test2 test2 = getNullTest2();;
+ Test2 test2 = getNullTest2();
int[] iarr = test1.iarr;
float[] farr = test2.farr;
iarr[0] = iarr[1];
diff --git a/test/611-checker-simplify-if/src/Main.java b/test/611-checker-simplify-if/src/Main.java
index 774f239..c1d75ec 100644
--- a/test/611-checker-simplify-if/src/Main.java
+++ b/test/611-checker-simplify-if/src/Main.java
@@ -144,7 +144,7 @@
/// CHECK-NOT: GreaterThanOrEqual
/// CHECK-NOT: If
public static void testGreaterCondition(String[] args) {
- int a = 42;;
+ int a = 42;
if (args.length == 42) {
a = 34;
} else {
diff --git a/test/625-checker-licm-regressions/src/Main.java b/test/625-checker-licm-regressions/src/Main.java
index cc1e07c..f372b1c 100644
--- a/test/625-checker-licm-regressions/src/Main.java
+++ b/test/625-checker-licm-regressions/src/Main.java
@@ -47,14 +47,83 @@
} while (j < arr.length);
}
+ //
+ // Similar situation as in foo(), but now a proper induction value
+ // is assigned to the field inside the do-while loop.
+ //
+ /// CHECK-START: void Main.bar(int[]) licm (before)
+ /// CHECK-DAG: LoadClass loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: StaticFieldSet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: NullCheck loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayLength loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.bar(int[]) licm (after)
+ /// CHECK-DAG: LoadClass loop:none
+ /// CHECK-DAG: StaticFieldSet loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: NullCheck loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayLength loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.bar(int[]) licm (after)
+ /// CHECK-NOT: LoadClass loop:{{B\d+}} outer_loop:none
+ static void bar(int[] arr) {
+ int j = 0;
+ do {
+ j++;
+ sA = j;
+ } while (j < arr.length);
+ }
+
+ //
+ // Similar situation as in bar(), but now an explicit catch
+ // statement may need the latest value of local j.
+ //
+ /// CHECK-START: int Main.catcher(int[]) licm (before)
+ /// CHECK-DAG: NullCheck loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayLength loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.catcher(int[]) licm (after)
+ /// CHECK-DAG: NullCheck loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayLength loop:<<Loop>> outer_loop:none
+ static int catcher(int[] arr) {
+ int j = 0;
+ try {
+ do {
+ j++;
+ } while (j < arr.length);
+ } catch (NullPointerException e) {
+ return -j; // flag exception with negative value
+ }
+ return j;
+ }
+
public static void main(String[] args) {
sA = 0;
try {
foo(null);
- } catch (Exception e) {
+ throw new Error("Expected NPE");
+ } catch (NullPointerException e) {
}
expectEquals(1, sA);
+ sA = 0;
+ try {
+ bar(null);
+ throw new Error("Expected NPE");
+ } catch (NullPointerException e) {
+ }
+ expectEquals(1, sA);
+
+ for (int i = 0; i < 5; i++) {
+ sA = 0;
+ bar(new int[i]);
+ expectEquals(i == 0 ? 1 : i, sA);
+ }
+
+ expectEquals(-1, catcher(null));
+ for (int i = 0; i < 5; i++) {
+ expectEquals(i == 0 ? 1 : i, catcher(new int[i]));
+ }
+
System.out.println("passed");
}
diff --git a/test/902-hello-transformation/run b/test/902-hello-transformation/run
index 204e4cc..3755d1d 100755
--- a/test/902-hello-transformation/run
+++ b/test/902-hello-transformation/run
@@ -39,5 +39,6 @@
--experimental runtime-plugins \
--runtime-option -agentpath:${agent}=902-hello-transformation,${arg} \
--android-runtime-option -Xplugin:${plugin} \
+ --android-runtime-option -Xfully-deoptable \
${other_args} \
--args ${lib}
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index 4087abd..0c627d6 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -27,9 +27,11 @@
#include "base/stringprintf.h"
#include "jit/jit.h"
#include "jni.h"
+#include "native_stack_dump.h"
#include "openjdkjvmti/jvmti.h"
#include "runtime.h"
#include "thread-inl.h"
+#include "thread_list.h"
#include "ti-agent/common_helper.h"
#include "ti-agent/common_load.h"
@@ -191,6 +193,20 @@
class_tag,
adapted_size,
length));
+
+ if (reference_kind == JVMTI_HEAP_REFERENCE_THREAD && *tag_ptr == 1000) {
+ DumpStacks();
+ }
+ }
+
+ static void DumpStacks() NO_THREAD_SAFETY_ANALYSIS {
+ auto dump_function = [](art::Thread* t, void* data ATTRIBUTE_UNUSED) {
+ std::string name;
+ t->GetThreadName(name);
+ LOG(ERROR) << name;
+ art::DumpNativeStack(LOG_STREAM(ERROR), t->GetTid());
+ };
+ art::Runtime::Current()->GetThreadList()->ForEach(dump_function, nullptr);
}
static std::string GetReferenceTypeStr(jvmtiHeapReferenceKind reference_kind,
diff --git a/test/Android.arm_vixl.mk b/test/Android.arm_vixl.mk
index 845545c..8ca4168 100644
--- a/test/Android.arm_vixl.mk
+++ b/test/Android.arm_vixl.mk
@@ -17,56 +17,27 @@
# Known broken tests for the ARM VIXL backend.
TEST_ART_BROKEN_OPTIMIZING_ARM_VIXL_RUN_TESTS := \
003-omnibus-opcodes \
- 004-ThreadStress \
- 028-array-write \
- 037-inherit \
- 042-new-instance \
+ 020-string \
+ 021-string2 \
044-proxy \
- 080-oom-throw \
082-inline-execute \
- 083-compiler-regressions \
096-array-copy-concurrent-gc \
- 099-vmdebug \
+ 100-reflect2 \
103-string-append \
- 114-ParallelGC \
122-npe \
- 123-inline-execute2 \
129-ThreadGetId \
137-cfi \
- 144-static-field-sigquit \
- 201-built-in-except-detail-messages \
- 412-new-array \
- 422-type-conversion \
- 437-inline \
439-npe \
- 442-checker-constant-folding \
- 450-checker-types \
- 458-checker-instruct-simplification \
- 458-long-to-fpu \
488-checker-inline-recursive-calls \
- 510-checker-try-catch \
- 515-dce-dominator \
520-equivalent-phi \
525-checker-arrays-fields1 \
525-checker-arrays-fields2 \
527-checker-array-access-split \
- 530-checker-loops2 \
- 530-checker-lse \
- 530-checker-lse2 \
- 535-regression-const-val \
- 536-checker-intrinsic-optimization \
538-checker-embed-constants \
550-checker-multiply-accumulate \
- 552-checker-primitive-typeprop \
552-checker-sharpening \
- 555-UnsafeGetLong-regression \
562-checker-no-intermediate \
564-checker-negbitwise \
570-checker-osr \
- 570-checker-select \
- 574-irreducible-and-constant-area \
- 580-checker-round \
- 594-checker-array-alias \
- 602-deoptimizeable \
- 700-LoadArgRegs \
- 800-smali \
+ 602-deoptimizeable
+
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 12e0338..2d26b48 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -19,7 +19,17 @@
exit 1
fi
-out_dir=${OUT_DIR-out}
+# Logic for setting out_dir from build/make/core/envsetup.mk:
+if [[ -z $OUT_DIR ]]; then
+ if [[ -z $OUT_DIR_COMMON_BASE ]]; then
+ out_dir=out
+ else
+ out_dir=${OUT_DIR_COMMON_BASE}/${PWD##*/}
+ fi
+else
+ out_dir=${OUT_DIR}
+fi
+
java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target ${out_dir}/host/linux-x86/bin/jack"
mode="target"