Merge "JDWP: attempt to fix failure on closed connection"
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 503187b..7c60026 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1071,12 +1071,6 @@
<< instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
}
-void SlowPathCode::RecordPcInfo(CodeGenerator* codegen,
- HInstruction* instruction,
- uint32_t dex_pc) {
- codegen->RecordPcInfo(instruction, dex_pc, this);
-}
-
void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
RegisterSet* register_set = locations->GetLiveRegisters();
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 938369b..cdd4675 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -81,7 +81,6 @@
virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
- void RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc);
bool IsCoreRegisterSaved(int reg) const {
return saved_core_stack_offsets_[reg] != kRegisterNotSaved;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9de9abf..0640179 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -142,24 +142,22 @@
class BoundsCheckSlowPathARM : public SlowPathCodeARM {
public:
- BoundsCheckSlowPathARM(HBoundsCheck* instruction,
- Location index_location,
- Location length_location)
- : instruction_(instruction),
- index_location_(index_location),
- length_location_(length_location) {}
+ explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction)
+ : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ LocationSummary* locations = instruction_->GetLocations();
+
__ Bind(GetEntryLabel());
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
codegen->EmitParallelMoves(
- index_location_,
+ locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimInt,
- length_location_,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
arm_codegen->InvokeRuntime(
@@ -172,8 +170,6 @@
private:
HBoundsCheck* const instruction_;
- const Location index_location_;
- const Location length_location_;
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
};
@@ -263,17 +259,12 @@
class TypeCheckSlowPathARM : public SlowPathCodeARM {
public:
- TypeCheckSlowPathARM(HInstruction* instruction,
- Location class_to_check,
- Location object_class,
- uint32_t dex_pc)
- : instruction_(instruction),
- class_to_check_(class_to_check),
- object_class_(object_class),
- dex_pc_(dex_pc) {}
+ explicit TypeCheckSlowPathARM(HInstruction* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
+ : locations->Out();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -285,20 +276,25 @@
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
codegen->EmitParallelMoves(
- class_to_check_,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- object_class_,
+ object_class,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
} else {
DCHECK(instruction_->IsCheckCast());
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
RestoreLiveRegisters(codegen, locations);
@@ -309,9 +305,6 @@
private:
HInstruction* const instruction_;
- const Location class_to_check_;
- const Location object_class_;
- uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
};
@@ -3899,8 +3892,8 @@
void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
- instruction, locations->InAt(0), locations->InAt(1));
+ SlowPathCodeARM* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
codegen_->AddSlowPath(slow_path);
Register index = locations->InAt(0).AsRegister<Register>();
@@ -4344,6 +4337,7 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// The out register is used as a temporary, so it overlaps with the inputs.
+ // Note that TypeCheckSlowPathARM uses this register too.
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
@@ -4373,8 +4367,7 @@
} else {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
- instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction);
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), NE);
__ LoadImmediate(out, 1);
@@ -4397,6 +4390,7 @@
instruction, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
+ // Note that TypeCheckSlowPathARM uses this register too.
locations->AddTemp(Location::RequiresRegister());
}
@@ -4407,8 +4401,8 @@
Register temp = locations->GetTemp(0).AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
- instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ SlowPathCodeARM* slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction);
codegen_->AddSlowPath(slow_path);
// avoid null check if we know obj is not null.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 25b3ea2..8035461 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -191,23 +191,19 @@
class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
- BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
- Location index_location,
- Location length_location)
- : instruction_(instruction),
- index_location_(index_location),
- length_location_(length_location) {}
-
+ explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+
__ Bind(GetEntryLabel());
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
codegen->EmitParallelMoves(
- index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
- length_location_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
+ locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
+ locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
@@ -219,8 +215,6 @@
private:
HBoundsCheck* const instruction_;
- const Location index_location_;
- const Location length_location_;
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
};
@@ -403,20 +397,17 @@
class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
- TypeCheckSlowPathARM64(HInstruction* instruction,
- Location class_to_check,
- Location object_class,
- uint32_t dex_pc)
- : instruction_(instruction),
- class_to_check_(class_to_check),
- object_class_(object_class),
- dex_pc_(dex_pc) {}
+ explicit TypeCheckSlowPathARM64(HInstruction* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
+ Location class_to_check = locations->InAt(1);
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
+ : locations->Out();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ uint32_t dex_pc = instruction_->GetDexPc();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -425,12 +416,12 @@
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
codegen->EmitParallelMoves(
- class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
- object_class_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
+ class_to_check, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
+ object_class, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
arm64_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
+ QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc, this);
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
@@ -438,7 +429,7 @@
const mirror::Class*, const mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
- arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
+ arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
@@ -450,9 +441,6 @@
private:
HInstruction* const instruction_;
- const Location class_to_check_;
- const Location object_class_;
- uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
};
@@ -1602,9 +1590,8 @@
}
void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations = instruction->GetLocations();
- BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
- instruction, locations->InAt(0), locations->InAt(1));
+ BoundsCheckSlowPathARM64* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
__ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
@@ -1616,17 +1603,17 @@
instruction, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
+ // Note that TypeCheckSlowPathARM64 uses this register too.
locations->AddTemp(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary* locations = instruction->GetLocations();
Register obj = InputRegisterAt(instruction, 0);;
Register cls = InputRegisterAt(instruction, 1);;
Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
- instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc());
+ SlowPathCodeARM64* slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
// Avoid null check if we know obj is not null.
@@ -2240,6 +2227,7 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// The output does overlap inputs.
+ // Note that TypeCheckSlowPathARM64 uses this register too.
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
@@ -2269,8 +2257,7 @@
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
- instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
+ new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 093d786..e4188e4 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -112,23 +112,19 @@
class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction,
- Location index_location,
- Location length_location)
- : instruction_(instruction),
- index_location_(index_location),
- length_location_(length_location) {}
+ explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(index_location_,
+ codegen->EmitParallelMoves(locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimInt,
- length_location_,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
@@ -144,8 +140,6 @@
private:
HBoundsCheck* const instruction_;
- const Location index_location_;
- const Location length_location_;
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
};
@@ -334,17 +328,13 @@
class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- TypeCheckSlowPathMIPS64(HInstruction* instruction,
- Location class_to_check,
- Location object_class,
- uint32_t dex_pc)
- : instruction_(instruction),
- class_to_check_(class_to_check),
- object_class_(object_class),
- dex_pc_(dex_pc) {}
+ explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
+ : locations->Out();
+ uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
@@ -355,17 +345,17 @@
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(class_to_check_,
+ codegen->EmitParallelMoves(locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- object_class_,
+ object_class,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
instruction_,
- dex_pc_,
+ dex_pc,
this);
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
@@ -376,7 +366,7 @@
const mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
@@ -388,9 +378,6 @@
private:
HInstruction* const instruction_;
- const Location class_to_check_;
- const Location object_class_;
- uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
};
@@ -1590,10 +1577,8 @@
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- BoundsCheckSlowPathMIPS64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(
- instruction,
- locations->InAt(0),
- locations->InAt(1));
+ BoundsCheckSlowPathMIPS64* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
@@ -1616,6 +1601,7 @@
LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
+ // Note that TypeCheckSlowPathMIPS64 uses this register too.
locations->AddTemp(Location::RequiresRegister());
}
@@ -1625,11 +1611,8 @@
GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(
- instruction,
- locations->InAt(1),
- Location::RegisterLocation(obj_cls),
- instruction->GetDexPc());
+ SlowPathCodeMIPS64* slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
// TODO: avoid this check if we know obj is not null.
@@ -2270,6 +2253,7 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// The output does overlap inputs.
+ // Note that TypeCheckSlowPathMIPS64 uses this register too.
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
@@ -2296,10 +2280,7 @@
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
- locations->InAt(1),
- locations->Out(),
- instruction->GetDexPc());
+ new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
__ Bnec(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 72c690d..e8aa61d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -116,24 +116,20 @@
class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
public:
- BoundsCheckSlowPathX86(HBoundsCheck* instruction,
- Location index_location,
- Location length_location)
- : instruction_(instruction),
- index_location_(index_location),
- length_location_(length_location) {}
+ explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
x86_codegen->EmitParallelMoves(
- index_location_,
+ locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimInt,
- length_location_,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
@@ -148,8 +144,6 @@
private:
HBoundsCheck* const instruction_;
- const Location index_location_;
- const Location length_location_;
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
};
@@ -280,15 +274,12 @@
class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
public:
- TypeCheckSlowPathX86(HInstruction* instruction,
- Location class_to_check,
- Location object_class)
- : instruction_(instruction),
- class_to_check_(class_to_check),
- object_class_(object_class) {}
+ explicit TypeCheckSlowPathX86(HInstruction* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
+ : locations->Out();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -300,10 +291,10 @@
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
x86_codegen->EmitParallelMoves(
- class_to_check_,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- object_class_,
+ object_class,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
@@ -332,8 +323,6 @@
private:
HInstruction* const instruction_;
- const Location class_to_check_;
- const Location object_class_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
};
@@ -4357,7 +4346,7 @@
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
SlowPathCodeX86* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction, index_loc, length_loc);
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -4830,6 +4819,7 @@
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86 uses this register too.
locations->SetOut(Location::RequiresRegister());
}
@@ -4866,8 +4856,7 @@
} else {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
- instruction, locations->InAt(1), locations->Out());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -4890,6 +4879,7 @@
instruction, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86 uses this register too.
locations->AddTemp(Location::RequiresRegister());
}
@@ -4899,8 +4889,8 @@
Location cls = locations->InAt(1);
Register temp = locations->GetTemp(0).AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
- instruction, locations->InAt(1), locations->GetTemp(0));
+ SlowPathCodeX86* slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
// Avoid null check if we know obj is not null.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 820ec78..ff52f4f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -170,24 +170,21 @@
class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- BoundsCheckSlowPathX86_64(HBoundsCheck* instruction,
- Location index_location,
- Location length_location)
- : instruction_(instruction),
- index_location_(index_location),
- length_location_(length_location) {}
+ explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction)
+ : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
codegen->EmitParallelMoves(
- index_location_,
+ locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimInt,
- length_location_,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
@@ -200,8 +197,6 @@
private:
HBoundsCheck* const instruction_;
- const Location index_location_;
- const Location length_location_;
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
};
@@ -293,17 +288,14 @@
class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- TypeCheckSlowPathX86_64(HInstruction* instruction,
- Location class_to_check,
- Location object_class,
- uint32_t dex_pc)
- : instruction_(instruction),
- class_to_check_(class_to_check),
- object_class_(object_class),
- dex_pc_(dex_pc) {}
+ explicit TypeCheckSlowPathX86_64(HInstruction* instruction)
+ : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
+ : locations->Out();
+ uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -315,23 +307,23 @@
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
codegen->EmitParallelMoves(
- class_to_check_,
+ locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- object_class_,
+ object_class,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
instruction_,
- dex_pc_,
+ dex_pc,
this);
} else {
DCHECK(instruction_->IsCheckCast());
x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
instruction_,
- dex_pc_,
+ dex_pc,
this);
}
@@ -347,9 +339,6 @@
private:
HInstruction* const instruction_;
- const Location class_to_check_;
- const Location object_class_;
- const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
};
@@ -4195,7 +4184,7 @@
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
SlowPathCodeX86_64* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction, index_loc, length_loc);
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -4653,6 +4642,7 @@
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86_64 uses this register too.
locations->SetOut(Location::RequiresRegister());
}
@@ -4688,8 +4678,7 @@
} else {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
- instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -4712,6 +4701,7 @@
instruction, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86_64 uses this register too.
locations->AddTemp(Location::RequiresRegister());
}
@@ -4721,8 +4711,8 @@
Location cls = locations->InAt(1);
CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
- instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ SlowPathCodeX86_64* slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
// Avoid null check if we know obj is not null.
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 806fd7a..69a3e62 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -103,7 +103,7 @@
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
Location::RegisterLocation(kArtMethodRegister));
- RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
UNREACHABLE();
@@ -989,10 +989,7 @@
DCHECK_ALIGNED(value_offset, 4);
static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded");
- // temp cannot overflow because we cannot allocate a String object with size 4GiB or greater.
- __ add(temp, temp, ShifterOperand(temp));
__ LoadImmediate(temp1, value_offset);
- __ add(temp, temp, ShifterOperand(value_offset));
// Loop to compare strings 2 characters at a time starting at the front of the string.
// Ok to do this because strings with an odd length are zero-padded.
@@ -1002,8 +999,8 @@
__ cmp(out, ShifterOperand(temp2));
__ b(&return_false, NE);
__ add(temp1, temp1, ShifterOperand(sizeof(uint32_t)));
- __ cmp(temp1, ShifterOperand(temp));
- __ b(&loop, LO);
+ __ subs(temp, temp, ShifterOperand(sizeof(uint32_t) / sizeof(uint16_t)));
+ __ b(&loop, GT);
// Return true and exit the function.
// If loop does not result in returning false, we return true.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index a5332ea..0171d69 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -112,7 +112,7 @@
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
LocationFrom(kArtMethodRegister));
- RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
UNREACHABLE();
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index b7126b2..be076cd 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -141,7 +141,7 @@
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
Location::RegisterLocation(EAX));
- RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
UNREACHABLE();
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 15fbac1..1f35b59 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -132,7 +132,7 @@
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(
invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(RDI));
- RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
UNREACHABLE();
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index e78914c..287aca9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2543,15 +2543,12 @@
bool allow_failure) {
// Search assuming unique-ness of dex file.
JavaVMExt* const vm = self->GetJniEnv()->vm;
- {
- MutexLock mu(self, vm->WeakGlobalsLock());
- for (jobject weak_root : dex_caches_) {
- DCHECK_EQ(GetIndirectRefKind(weak_root), kWeakGlobal);
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
- vm->DecodeWeakGlobalLocked(self, weak_root));
- if (dex_cache != nullptr && dex_cache->GetDexFile() == &dex_file) {
- return dex_cache;
- }
+ for (jobject weak_root : dex_caches_) {
+ DCHECK_EQ(GetIndirectRefKind(weak_root), kWeakGlobal);
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
+ vm->DecodeWeakGlobal(self, weak_root));
+ if (dex_cache != nullptr && dex_cache->GetDexFile() == &dex_file) {
+ return dex_cache;
}
}
if (allow_failure) {
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 213f25d..29413bf 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -1132,7 +1132,7 @@
return ProcessAnnotationSet(field_class, annotation_set, kDexVisibilityRuntime);
}
-mirror::ObjectArray<mirror::Object>* DexFile::GetSignatureAnnotationForField(ArtField* field)
+mirror::ObjectArray<mirror::String>* DexFile::GetSignatureAnnotationForField(ArtField* field)
const {
const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
if (annotation_set == nullptr) {
@@ -1253,7 +1253,7 @@
return ProcessAnnotationSet(method_class, annotation_set, kDexVisibilityRuntime);
}
-mirror::ObjectArray<mirror::Object>* DexFile::GetExceptionTypesForMethod(ArtMethod* method) const {
+mirror::ObjectArray<mirror::Class>* DexFile::GetExceptionTypesForMethod(ArtMethod* method) const {
const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
return nullptr;
@@ -1289,7 +1289,7 @@
Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
method_class, annotation_set, kDexVisibilityRuntime, annotation_class);
- return (annotation_item != nullptr);
+ return annotation_item != nullptr;
}
const DexFile::AnnotationSetItem* DexFile::FindAnnotationSetForClass(Handle<mirror::Class> klass)
@@ -1317,6 +1317,153 @@
return ProcessAnnotationSet(klass, annotation_set, kDexVisibilityRuntime);
}
+mirror::ObjectArray<mirror::Class>* DexFile::GetDeclaredClasses(Handle<mirror::Class> klass) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ const AnnotationItem* annotation_item = SearchAnnotationSet(
+ annotation_set, "Ldalvik/annotation/MemberClasses;", kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ StackHandleScope<1> hs(Thread::Current());
+ mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+ Handle<mirror::Class> class_array_class(hs.NewHandle(
+ Runtime::Current()->GetClassLinker()->FindArrayClass(hs.Self(), &class_class)));
+ if (class_array_class.Get() == nullptr) {
+ return nullptr;
+ }
+ mirror::Object* obj = GetAnnotationValue(
+ klass, annotation_item, "value", class_array_class, kDexAnnotationArray);
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ return obj->AsObjectArray<mirror::Class>();
+}
+
+mirror::Class* DexFile::GetDeclaringClass(Handle<mirror::Class> klass) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ const AnnotationItem* annotation_item = SearchAnnotationSet(
+ annotation_set, "Ldalvik/annotation/EnclosingClass;", kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ mirror::Object* obj = GetAnnotationValue(
+ klass, annotation_item, "value", NullHandle<mirror::Class>(), kDexAnnotationType);
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ return obj->AsClass();
+}
+
+mirror::Class* DexFile::GetEnclosingClass(Handle<mirror::Class> klass) const {
+ mirror::Class* declaring_class = GetDeclaringClass(klass);
+ if (declaring_class != nullptr) {
+ return declaring_class;
+ }
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ const AnnotationItem* annotation_item = SearchAnnotationSet(
+ annotation_set, "Ldalvik/annotation/EnclosingMethod;", kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "value");
+ if (annotation == nullptr) {
+ return nullptr;
+ }
+ AnnotationValue annotation_value;
+ if (!ProcessAnnotationValue(
+ klass, &annotation, &annotation_value, NullHandle<mirror::Class>(), kAllRaw)) {
+ return nullptr;
+ }
+ if (annotation_value.type_ != kDexAnnotationMethod) {
+ return nullptr;
+ }
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
+ ArtMethod* method = Runtime::Current()->GetClassLinker()->ResolveMethodWithoutInvokeType(
+ klass->GetDexFile(), annotation_value.value_.GetI(), dex_cache, class_loader);
+ if (method == nullptr) {
+ return nullptr;
+ }
+ return method->GetDeclaringClass();
+}
+
+mirror::Object* DexFile::GetEnclosingMethod(Handle<mirror::Class> klass) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ const AnnotationItem* annotation_item = SearchAnnotationSet(
+ annotation_set, "Ldalvik/annotation/EnclosingMethod;", kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ return GetAnnotationValue(
+ klass, annotation_item, "value", NullHandle<mirror::Class>(), kDexAnnotationMethod);
+}
+
+bool DexFile::GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ if (annotation_set == nullptr) {
+ return false;
+ }
+ const AnnotationItem* annotation_item = SearchAnnotationSet(
+ annotation_set, "Ldalvik/annotation/InnerClass;", kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return false;
+ }
+ const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "name");
+ if (annotation == nullptr) {
+ return false;
+ }
+ AnnotationValue annotation_value;
+ if (!ProcessAnnotationValue(
+ klass, &annotation, &annotation_value, NullHandle<mirror::Class>(), kAllObjects)) {
+ return false;
+ }
+ if (annotation_value.type_ != kDexAnnotationNull &&
+ annotation_value.type_ != kDexAnnotationString) {
+ return false;
+ }
+ *name = down_cast<mirror::String*>(annotation_value.value_.GetL());
+ return true;
+}
+
+bool DexFile::GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ if (annotation_set == nullptr) {
+ return false;
+ }
+ const AnnotationItem* annotation_item = SearchAnnotationSet(
+ annotation_set, "Ldalvik/annotation/InnerClass;", kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return false;
+ }
+ const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "accessFlags");
+ if (annotation == nullptr) {
+ return false;
+ }
+ AnnotationValue annotation_value;
+ if (!ProcessAnnotationValue(
+ klass, &annotation, &annotation_value, NullHandle<mirror::Class>(), kAllRaw)) {
+ return false;
+ }
+ if (annotation_value.type_ != kDexAnnotationInt) {
+ return false;
+ }
+ *flags = annotation_value.value_.GetI();
+ return true;
+}
+
bool DexFile::IsClassAnnotationPresent(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class) const {
const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
@@ -1325,7 +1472,7 @@
}
const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
klass, annotation_set, kDexVisibilityRuntime, annotation_class);
- return (annotation_item != nullptr);
+ return annotation_item != nullptr;
}
mirror::Object* DexFile::CreateAnnotationMember(Handle<mirror::Class> klass,
@@ -1440,7 +1587,7 @@
return annotation_value.value_.GetL();
}
-mirror::ObjectArray<mirror::Object>* DexFile::GetSignatureValue(Handle<mirror::Class> klass,
+mirror::ObjectArray<mirror::String>* DexFile::GetSignatureValue(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set) const {
StackHandleScope<1> hs(Thread::Current());
const AnnotationItem* annotation_item =
@@ -1451,15 +1598,18 @@
mirror::Class* string_class = mirror::String::GetJavaLangString();
Handle<mirror::Class> string_array_class(hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &string_class)));
+ if (string_array_class.Get() == nullptr) {
+ return nullptr;
+ }
mirror::Object* obj =
GetAnnotationValue(klass, annotation_item, "value", string_array_class, kDexAnnotationArray);
if (obj == nullptr) {
return nullptr;
}
- return obj->AsObjectArray<mirror::Object>();
+ return obj->AsObjectArray<mirror::String>();
}
-mirror::ObjectArray<mirror::Object>* DexFile::GetThrowsValue(Handle<mirror::Class> klass,
+mirror::ObjectArray<mirror::Class>* DexFile::GetThrowsValue(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set) const {
StackHandleScope<1> hs(Thread::Current());
const AnnotationItem* annotation_item =
@@ -1470,12 +1620,15 @@
mirror::Class* class_class = mirror::Class::GetJavaLangClass();
Handle<mirror::Class> class_array_class(hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &class_class)));
+ if (class_array_class.Get() == nullptr) {
+ return nullptr;
+ }
mirror::Object* obj =
GetAnnotationValue(klass, annotation_item, "value", class_array_class, kDexAnnotationArray);
if (obj == nullptr) {
return nullptr;
}
- return obj->AsObjectArray<mirror::Object>();
+ return obj->AsObjectArray<mirror::Class>();
}
mirror::ObjectArray<mirror::Object>* DexFile::ProcessAnnotationSet(Handle<mirror::Class> klass,
@@ -1507,6 +1660,8 @@
if (annotation_obj != nullptr) {
result->SetWithoutChecks<false>(dest_index, annotation_obj);
++dest_index;
+ } else if (self->IsExceptionPending()) {
+ return nullptr;
}
}
@@ -1516,6 +1671,10 @@
mirror::ObjectArray<mirror::Object>* trimmed_result =
mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), dest_index);
+ if (trimmed_result == nullptr) {
+ return nullptr;
+ }
+
for (uint32_t i = 0; i < dest_index; ++i) {
mirror::Object* obj = result->GetWithoutChecks(i);
trimmed_result->SetWithoutChecks<false>(i, obj);
@@ -1533,6 +1692,9 @@
soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
mirror::Class* annotation_array_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(self, &annotation_array_class);
+ if (annotation_array_array_class == nullptr) {
+ return nullptr;
+ }
Handle<mirror::ObjectArray<mirror::Object>> annotation_array_array(hs.NewHandle(
mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_array_class, size)));
if (annotation_array_array.Get() == nullptr) {
@@ -1625,9 +1787,8 @@
klass->GetDexFile(), index, klass.Get());
set_object = true;
if (element_object == nullptr) {
- self->ClearException();
- const char* msg = StringByTypeIdx(index);
- self->ThrowNewException("Ljava/lang/TypeNotPresentException;", msg);
+ // TODO: Put a TypeNotFoundExceptionProxy instead of throwing here.
+ return false;
}
}
break;
@@ -1831,8 +1992,10 @@
soa.Decode<mirror::Class*>(WellKnownClasses::libcore_reflect_AnnotationMember);
mirror::Class* annotation_member_array_class =
class_linker->FindArrayClass(self, &annotation_member_class);
+ if (annotation_member_array_class == nullptr) {
+ return nullptr;
+ }
mirror::ObjectArray<mirror::Object>* element_array = nullptr;
-
if (size > 0) {
element_array =
mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_member_array_class, size);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 8928321..98d4e59 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -931,7 +931,7 @@
const SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) const
SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::Object>* GetSignatureAnnotationForField(ArtField* field) const
+ mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field) const
SHARED_REQUIRES(Locks::mutator_lock_);
bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -946,7 +946,7 @@
const SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) const
SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::Object>* GetExceptionTypesForMethod(ArtMethod* method) const
+ mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) const
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -960,6 +960,18 @@
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) const
SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class)
const SHARED_REQUIRES(Locks::mutator_lock_);
@@ -983,11 +995,11 @@
Handle<mirror::Class> array_class,
uint32_t expected_type) const
SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::Object>* GetSignatureValue(Handle<mirror::Class> klass,
+ mirror::ObjectArray<mirror::String>* GetSignatureValue(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set)
const SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::Object>* GetThrowsValue(Handle<mirror::Class> klass,
- const AnnotationSetItem* annotation_set) const
+ mirror::ObjectArray<mirror::Class>* GetThrowsValue(Handle<mirror::Class> klass,
+ const AnnotationSetItem* annotation_set) const
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set,
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index aec8d63..4bc44d3 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1900,6 +1900,11 @@
CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
}
+bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
+ return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
+ foreground_collector_type_ == kCollectorTypeCMS;
+}
+
HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
Thread* self = Thread::Current();
// Inc requested homogeneous space compaction.
@@ -1919,7 +1924,10 @@
// exit.
if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
!main_space_->CanMoveObjects()) {
- return HomogeneousSpaceCompactResult::kErrorReject;
+ return kErrorReject;
+ }
+ if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
+ return kErrorUnsupported;
}
collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 85688ae..8bffe5e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -105,6 +105,8 @@
kSuccess,
// Reject due to disabled moving GC.
kErrorReject,
+ // Unsupported due to the current configuration.
+ kErrorUnsupported,
// System is shutting down.
kErrorVMShuttingDown,
};
@@ -753,6 +755,10 @@
void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
+ // Create a new alloc space and compact default alloc space to it.
+ HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
+ bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -905,9 +911,6 @@
// Find a collector based on GC type.
collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
- // Create a new alloc space and compact default alloc space to it.
- HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
-
// Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
void CreateMainMallocSpace(MemMap* mem_map,
size_t initial_size,
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index c398555..d13526b 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -290,9 +290,7 @@
// Synchronized get which reads a reference, acquiring a lock if necessary.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/,
- IndirectRef iref) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* SynchronizedGet(IndirectRef iref) const SHARED_REQUIRES(Locks::mutator_lock_) {
return Get<kReadBarrierOption>(iref);
}
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index ef7a924..8060e3d 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -375,7 +375,7 @@
unchecked_functions_(&gJniInvokeInterface),
weak_globals_lock_("JNI weak global reference table lock", kJniWeakGlobalsLock),
weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
- allow_new_weak_globals_(true),
+ allow_accessing_weak_globals_(true),
weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
functions = unchecked_functions_;
SetCheckJniEnabled(runtime_options.Exists(RuntimeArgumentMap::CheckJni));
@@ -473,8 +473,7 @@
return nullptr;
}
MutexLock mu(self, weak_globals_lock_);
- while (UNLIKELY((!kUseReadBarrier && !allow_new_weak_globals_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
@@ -542,14 +541,19 @@
}
void JavaVMExt::DisallowNewWeakGlobals() {
- MutexLock mu(Thread::Current(), weak_globals_lock_);
- allow_new_weak_globals_ = false;
+ Thread* const self = Thread::Current();
+ MutexLock mu(self, weak_globals_lock_);
+ // DisallowNewWeakGlobals is only called by CMS during the pause. It is required to have the
+ // mutator lock exclusively held so that we don't have any threads in the middle of
+ // DecodeWeakGlobal.
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ allow_accessing_weak_globals_.StoreSequentiallyConsistent(false);
}
void JavaVMExt::AllowNewWeakGlobals() {
Thread* self = Thread::Current();
MutexLock mu(self, weak_globals_lock_);
- allow_new_weak_globals_ = true;
+ allow_accessing_weak_globals_.StoreSequentiallyConsistent(true);
weak_globals_add_condition_.Broadcast(self);
}
@@ -557,7 +561,7 @@
// Lock and unlock once to ensure that no threads are still in the
// middle of adding new weak globals.
MutexLock mu(Thread::Current(), weak_globals_lock_);
- CHECK(!allow_new_weak_globals_);
+ CHECK(!allow_accessing_weak_globals_.LoadSequentiallyConsistent());
}
void JavaVMExt::BroadcastForNewWeakGlobals() {
@@ -567,8 +571,8 @@
weak_globals_add_condition_.Broadcast(self);
}
-mirror::Object* JavaVMExt::DecodeGlobal(Thread* self, IndirectRef ref) {
- return globals_.SynchronizedGet(self, &globals_lock_, ref);
+mirror::Object* JavaVMExt::DecodeGlobal(IndirectRef ref) {
+ return globals_.SynchronizedGet(ref);
}
void JavaVMExt::UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* result) {
@@ -576,7 +580,25 @@
globals_.Update(ref, result);
}
+inline bool JavaVMExt::MayAccessWeakGlobals(Thread* self) const {
+ return MayAccessWeakGlobalsUnlocked(self);
+}
+
+inline bool JavaVMExt::MayAccessWeakGlobalsUnlocked(Thread* self) const {
+ return kUseReadBarrier ? self->GetWeakRefAccessEnabled() :
+ allow_accessing_weak_globals_.LoadSequentiallyConsistent();
+}
+
mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
+ // It is safe to access GetWeakRefAccessEnabled without the lock since CC uses checkpoints to call
+ // SetWeakRefAccessEnabled, and the other collectors only modify allow_accessing_weak_globals_
+ // when the mutators are paused.
+ // This only applies in the case where MayAccessWeakGlobals goes from false to true. In the other
+ // case, it may be racy, this is benign since DecodeWeakGlobalLocked does the correct behavior
+ // if MayAccessWeakGlobals is false.
+ if (LIKELY(MayAccessWeakGlobalsUnlocked(self))) {
+ return weak_globals_.SynchronizedGet(ref);
+ }
MutexLock mu(self, weak_globals_lock_);
return DecodeWeakGlobalLocked(self, ref);
}
@@ -585,8 +607,7 @@
if (kDebugLocking) {
weak_globals_lock_.AssertHeld(self);
}
- while (UNLIKELY((!kUseReadBarrier && !allow_new_weak_globals_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
return weak_globals_.Get(ref);
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index e80266f..d68a85f 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -126,7 +126,7 @@
void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- mirror::Object* DecodeGlobal(Thread* self, IndirectRef ref)
+ mirror::Object* DecodeGlobal(IndirectRef ref)
SHARED_REQUIRES(Locks::mutator_lock_);
void UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
@@ -155,6 +155,12 @@
REQUIRES(!globals_lock_);
private:
+ // Return true if self can currently access weak globals.
+ bool MayAccessWeakGlobalsUnlocked(Thread* self) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool MayAccessWeakGlobals(Thread* self) const
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(weak_globals_lock_);
+
Runtime* const runtime_;
// Used for testing. By default, we'll LOG(FATAL) the reason.
@@ -184,8 +190,10 @@
// Since weak_globals_ contain weak roots, be careful not to
// directly access the object references in it. Use Get() with the
// read barrier enabled.
- IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
- bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
+ // Not guarded by weak_globals_lock since we may use SynchronizedGet in DecodeWeakGlobal.
+ IndirectReferenceTable weak_globals_;
+ // Not guarded by weak_globals_lock since we may use SynchronizedGet in DecodeWeakGlobal.
+ Atomic<bool> allow_accessing_weak_globals_;
ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
DISALLOW_COPY_AND_ASSIGN(JavaVMExt);
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 7e464e9..8fd6849 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -430,7 +430,7 @@
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
- if (klass->IsProxyClass()) {
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
return nullptr;
}
Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
@@ -442,7 +442,7 @@
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
- if (klass->IsProxyClass()) {
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
// Return an empty array instead of a null pointer.
mirror::Class* annotation_array_class =
soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
@@ -453,18 +453,141 @@
return soa.AddLocalReference<jobjectArray>(klass->GetDexFile().GetAnnotationsForClass(klass));
}
+static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ mirror::ObjectArray<mirror::Class>* classes = nullptr;
+ if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) {
+ classes = klass->GetDexFile().GetDeclaredClasses(klass);
+ }
+ if (classes == nullptr) {
+ // Return an empty array instead of a null pointer.
+ mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+ mirror::Class* class_array_class =
+ Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
+ if (class_array_class == nullptr) {
+ return nullptr;
+ }
+ mirror::ObjectArray<mirror::Class>* empty_array =
+ mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0);
+ return soa.AddLocalReference<jobjectArray>(empty_array);
+ }
+ return soa.AddLocalReference<jobjectArray>(classes);
+}
+
+static jclass Class_getEnclosingClass(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jclass>(klass->GetDexFile().GetEnclosingClass(klass));
+}
+
+static jobject Class_getEnclosingConstructorNative(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return nullptr;
+ }
+ mirror::Object* method = klass->GetDexFile().GetEnclosingMethod(klass);
+ if (method != nullptr) {
+ if (method->GetClass() ==
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Constructor)) {
+ return soa.AddLocalReference<jobject>(method);
+ }
+ }
+ return nullptr;
+}
+
+static jobject Class_getEnclosingMethodNative(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return nullptr;
+ }
+ mirror::Object* method = klass->GetDexFile().GetEnclosingMethod(klass);
+ if (method != nullptr) {
+ if (method->GetClass() ==
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Method)) {
+ return soa.AddLocalReference<jobject>(method);
+ }
+ }
+ return nullptr;
+}
+
+static jint Class_getInnerClassFlags(JNIEnv* env, jobject javaThis, jint defaultValue) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return defaultValue;
+ }
+ uint32_t flags;
+ if (!klass->GetDexFile().GetInnerClassFlags(klass, &flags)) {
+ return defaultValue;
+ }
+ return flags;
+}
+
+static jstring Class_getInnerClassName(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return nullptr;
+ }
+ mirror::String* class_name = nullptr;
+ if (!klass->GetDexFile().GetInnerClass(klass, &class_name)) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jstring>(class_name);
+}
+
+static jboolean Class_isAnonymousClass(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return false;
+ }
+ mirror::String* class_name = nullptr;
+ if (!klass->GetDexFile().GetInnerClass(klass, &class_name)) {
+ return false;
+ }
+ return class_name == nullptr;
+}
+
static jboolean Class_isDeclaredAnnotationPresent(JNIEnv* env, jobject javaThis,
jclass annotationType) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
- if (klass->IsProxyClass()) {
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
return false;
}
Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
return klass->GetDexFile().IsClassAnnotationPresent(klass, annotation_class);
}
+static jclass Class_getDeclaringClass(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return nullptr;
+ }
+ // Return null for anonymous classes.
+ if (Class_isAnonymousClass(env, javaThis)) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jclass>(klass->GetDexFile().GetDeclaringClass(klass));
+}
+
static jobject Class_newInstance(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<4> hs(soa.Self());
@@ -550,6 +673,7 @@
NATIVE_METHOD(Class, getDeclaredAnnotation,
"!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
NATIVE_METHOD(Class, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Class, getDeclaredClasses, "!()[Ljava/lang/Class;"),
NATIVE_METHOD(Class, getDeclaredConstructorInternal,
"!([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;"),
NATIVE_METHOD(Class, getDeclaredConstructorsInternal, "!(Z)[Ljava/lang/reflect/Constructor;"),
@@ -561,9 +685,16 @@
"!(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;"),
NATIVE_METHOD(Class, getDeclaredMethodsUnchecked,
"!(Z)[Ljava/lang/reflect/Method;"),
+ NATIVE_METHOD(Class, getDeclaringClass, "!()Ljava/lang/Class;"),
+ NATIVE_METHOD(Class, getEnclosingClass, "!()Ljava/lang/Class;"),
+ NATIVE_METHOD(Class, getEnclosingConstructorNative, "!()Ljava/lang/reflect/Constructor;"),
+ NATIVE_METHOD(Class, getEnclosingMethodNative, "!()Ljava/lang/reflect/Method;"),
+ NATIVE_METHOD(Class, getInnerClassFlags, "!(I)I"),
+ NATIVE_METHOD(Class, getInnerClassName, "!()Ljava/lang/String;"),
NATIVE_METHOD(Class, getNameNative, "!()Ljava/lang/String;"),
NATIVE_METHOD(Class, getProxyInterfaces, "!()[Ljava/lang/Class;"),
NATIVE_METHOD(Class, getPublicDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, isAnonymousClass, "!()Z"),
NATIVE_METHOD(Class, isDeclaredAnnotationPresent, "!(Ljava/lang/Class;)Z"),
NATIVE_METHOD(Class, newInstance, "!()Ljava/lang/Object;"),
};
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index b4b77e7..e1e9ceb 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -48,15 +48,18 @@
static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- mirror::ObjectArray<mirror::Object>* result_array =
+ mirror::ObjectArray<mirror::Class>* result_array =
method->GetDexFile()->GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
// Return an empty array instead of a null pointer.
mirror::Class* class_class = mirror::Class::GetJavaLangClass();
mirror::Class* class_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
- mirror::ObjectArray<mirror::Object>* empty_array =
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), class_array_class, 0);
+ if (class_array_class == nullptr) {
+ return nullptr;
+ }
+ mirror::ObjectArray<mirror::Class>* empty_array =
+ mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0);
return soa.AddLocalReference<jobjectArray>(empty_array);
} else {
return soa.AddLocalReference<jobjectArray>(result_array);
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 1219f85..caacba6 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -82,15 +82,18 @@
mirror::ObjectArray<mirror::Class>* declared_exceptions = klass->GetThrows()->Get(throws_index);
return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self()));
} else {
- mirror::ObjectArray<mirror::Object>* result_array =
+ mirror::ObjectArray<mirror::Class>* result_array =
method->GetDexFile()->GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
// Return an empty array instead of a null pointer
mirror::Class* class_class = mirror::Class::GetJavaLangClass();
mirror::Class* class_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
- mirror::ObjectArray<mirror::Object>* empty_array =
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), class_array_class, 0);
+ if (class_array_class == nullptr) {
+ return nullptr;
+ }
+ mirror::ObjectArray<mirror::Class>* empty_array =
+ mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0);
return soa.AddLocalReference<jobjectArray>(empty_array);
} else {
return soa.AddLocalReference<jobjectArray>(result_array);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a9dc16d..25bb827 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -20,6 +20,7 @@
#include <sys/mount.h>
#ifdef __linux__
#include <linux/fs.h>
+#include <sys/prctl.h>
#endif
#define ATRACE_TAG ATRACE_TAG_DALVIK
@@ -493,6 +494,14 @@
CHECK(!no_sig_chain_) << "A started runtime should have sig chain enabled";
+ // If a debug host build, disable ptrace restriction for debugging and test timeout thread dump.
+ // Only 64-bit as prctl() may fail in 32 bit userspace on a 64-bit kernel.
+#if defined(__linux__) && !defined(__ANDROID__) && defined(__x86_64__)
+ if (kIsDebugBuild) {
+ CHECK_EQ(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY), 0);
+ }
+#endif
+
// Restore main thread state to kNative as expected by native code.
Thread* self = Thread::Current();
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 63534b1..9929487 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1728,7 +1728,7 @@
result = nullptr;
}
} else if (kind == kGlobal) {
- result = tlsPtr_.jni_env->vm->DecodeGlobal(const_cast<Thread*>(this), ref);
+ result = tlsPtr_.jni_env->vm->DecodeGlobal(ref);
} else {
DCHECK_EQ(kind, kWeakGlobal);
result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
diff --git a/test/1337-gc-coverage/check b/test/1337-gc-coverage/check
new file mode 100755
index 0000000..842bdc6
--- /dev/null
+++ b/test/1337-gc-coverage/check
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Check that the string "error" isn't present
+if grep error "$2"; then
+ exit 1
+else
+ exit 0
+fi
diff --git a/test/1337-gc-coverage/expected.txt b/test/1337-gc-coverage/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/1337-gc-coverage/expected.txt
diff --git a/test/1337-gc-coverage/gc_coverage.cc b/test/1337-gc-coverage/gc_coverage.cc
new file mode 100644
index 0000000..7cf30bd
--- /dev/null
+++ b/test/1337-gc-coverage/gc_coverage.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "jni.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace {
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_performHomogeneousSpaceCompact(JNIEnv*, jclass) {
+ return Runtime::Current()->GetHeap()->PerformHomogeneousSpaceCompact() == gc::kSuccess ?
+ JNI_TRUE : JNI_FALSE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_supportHomogeneousSpaceCompact(JNIEnv*, jclass) {
+ return Runtime::Current()->GetHeap()->SupportHomogeneousSpaceCompactAndCollectorTransitions() ?
+ JNI_TRUE : JNI_FALSE;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_incrementDisableMovingGC(JNIEnv*, jclass) {
+ Runtime::Current()->GetHeap()->IncrementDisableMovingGC(Thread::Current());
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_decrementDisableMovingGC(JNIEnv*, jclass) {
+ Runtime::Current()->GetHeap()->DecrementDisableMovingGC(Thread::Current());
+}
+
+extern "C" JNIEXPORT jlong JNICALL Java_Main_objectAddress(JNIEnv* env, jclass, jobject object) {
+ ScopedObjectAccess soa(env);
+ return reinterpret_cast<jlong>(soa.Decode<mirror::Object*>(object));
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_supportCollectorTransition(JNIEnv*, jclass) {
+ // Same as supportHomogeneousSpaceCompact for now.
+ return Runtime::Current()->GetHeap()->SupportHomogeneousSpaceCompactAndCollectorTransitions() ?
+ JNI_TRUE : JNI_FALSE;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_transitionToSS(JNIEnv*, jclass) {
+ Runtime::Current()->GetHeap()->TransitionCollector(gc::kCollectorTypeSS);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_transitionToCMS(JNIEnv*, jclass) {
+ Runtime::Current()->GetHeap()->TransitionCollector(gc::kCollectorTypeCMS);
+}
+
+} // namespace
+} // namespace art
diff --git a/test/1337-gc-coverage/info.txt b/test/1337-gc-coverage/info.txt
new file mode 100644
index 0000000..7e3acd3
--- /dev/null
+++ b/test/1337-gc-coverage/info.txt
@@ -0,0 +1 @@
+Tests internal GC functions which are not exposed through normal APIs.
\ No newline at end of file
diff --git a/test/1337-gc-coverage/src/Main.java b/test/1337-gc-coverage/src/Main.java
new file mode 100644
index 0000000..7875eb1
--- /dev/null
+++ b/test/1337-gc-coverage/src/Main.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.TreeMap;
+
+public class Main {
+ private static TreeMap treeMap = new TreeMap();
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ testHomogeneousCompaction();
+ testCollectorTransitions();
+ System.out.println("Done.");
+ }
+
+ private static void allocateStuff() {
+ for (int i = 0; i < 1000; ++i) {
+ Object o = new Object();
+ treeMap.put(o.hashCode(), o);
+ }
+ }
+
+ public static void testHomogeneousCompaction() {
+ System.out.println("Attempting homogeneous compaction");
+ final boolean supportHSC = supportHomogeneousSpaceCompact();
+ Object o = new Object();
+ long addressBefore = objectAddress(o);
+ long addressAfter;
+ allocateStuff();
+ final boolean success = performHomogeneousSpaceCompact();
+ allocateStuff();
+ System.out.println("Homogeneous compaction support=" + supportHSC + " success=" + success);
+ if (supportHSC != success) {
+ System.out.println("error: Expected " + supportHSC + " but got " + success);
+ }
+ if (success) {
+ allocateStuff();
+ addressAfter = objectAddress(o);
+ // This relies on the compaction copying from one space to another space and there being no
+ // overlap.
+ if (addressBefore == addressAfter) {
+ System.out.println("error: Expected different adddress " + addressBefore + " vs " +
+ addressAfter);
+ }
+ }
+ if (supportHSC) {
+ incrementDisableMovingGC();
+ if (performHomogeneousSpaceCompact()) {
+ System.out.println("error: Compaction succeeded when moving GC is disabled");
+ }
+ decrementDisableMovingGC();
+ if (!performHomogeneousSpaceCompact()) {
+ System.out.println("error: Compaction failed when moving GC is enabled");
+ }
+ }
+ }
+
+ private static void testCollectorTransitions() {
+ if (supportCollectorTransition()) {
+ Object o = new Object();
+ // Transition to semi-space collector.
+ allocateStuff();
+ transitionToSS();
+ allocateStuff();
+ long addressBefore = objectAddress(o);
+ Runtime.getRuntime().gc();
+ long addressAfter = objectAddress(o);
+ if (addressBefore == addressAfter) {
+ System.out.println("error: Expected different adddress " + addressBefore + " vs " +
+ addressAfter);
+ }
+ // Transition back to CMS.
+ transitionToCMS();
+ allocateStuff();
+ addressBefore = objectAddress(o);
+ Runtime.getRuntime().gc();
+ addressAfter = objectAddress(o);
+ if (addressBefore != addressAfter) {
+ System.out.println("error: Expected same adddress " + addressBefore + " vs " +
+ addressAfter);
+ }
+ }
+ }
+
+ // Methods to get access to ART internals.
+ private static native boolean supportHomogeneousSpaceCompact();
+ private static native boolean performHomogeneousSpaceCompact();
+ private static native void incrementDisableMovingGC();
+ private static native void decrementDisableMovingGC();
+ private static native long objectAddress(Object object);
+ private static native boolean supportCollectorTransition();
+ private static native void transitionToSS();
+ private static native void transitionToCMS();
+}
diff --git a/test/999-jni-perf/check b/test/999-jni-perf/check
new file mode 100755
index 0000000..ffbb8cf
--- /dev/null
+++ b/test/999-jni-perf/check
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Only compare the last line.
+tail -n 1 "$2" | diff --strip-trailing-cr -q "$1" - >/dev/null
\ No newline at end of file
diff --git a/test/999-jni-perf/expected.txt b/test/999-jni-perf/expected.txt
new file mode 100644
index 0000000..a965a70
--- /dev/null
+++ b/test/999-jni-perf/expected.txt
@@ -0,0 +1 @@
+Done
diff --git a/test/999-jni-perf/info.txt b/test/999-jni-perf/info.txt
new file mode 100644
index 0000000..010b57b
--- /dev/null
+++ b/test/999-jni-perf/info.txt
@@ -0,0 +1 @@
+Tests for measuring performance of JNI state changes.
diff --git a/test/999-jni-perf/perf-jni.cc b/test/999-jni-perf/perf-jni.cc
new file mode 100644
index 0000000..51eeb83
--- /dev/null
+++ b/test/999-jni-perf/perf-jni.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include "jni.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_perfJniEmptyCall(JNIEnv*, jobject) {
+ return 0;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_perfSOACall(JNIEnv*, jobject) {
+ ScopedObjectAccess soa(Thread::Current());
+ return 0;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_perfSOAUncheckedCall(JNIEnv*, jobject) {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ return 0;
+}
+
+} // namespace
+
+} // namespace art
diff --git a/test/999-jni-perf/src/Main.java b/test/999-jni-perf/src/Main.java
new file mode 100644
index 0000000..032e700
--- /dev/null
+++ b/test/999-jni-perf/src/Main.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public Main() {
+ }
+
+ private static final String MSG = "ABCDE";
+
+ native int perfJniEmptyCall();
+ native int perfSOACall();
+ native int perfSOAUncheckedCall();
+
+ int runPerfTest(long N) {
+ long start = System.nanoTime();
+ for (long i = 0; i < N; i++) {
+ char c = MSG.charAt(2);
+ }
+ long elapse = System.nanoTime() - start;
+ System.out.println("Fast JNI (charAt): " + (double)elapse / N);
+
+ start = System.nanoTime();
+ for (long i = 0; i < N; i++) {
+ perfJniEmptyCall();
+ }
+ elapse = System.nanoTime() - start;
+ System.out.println("Empty call: " + (double)elapse / N);
+
+ start = System.nanoTime();
+ for (long i = 0; i < N; i++) {
+ perfSOACall();
+ }
+ elapse = System.nanoTime() - start;
+ System.out.println("SOA call: " + (double)elapse / N);
+
+ start = System.nanoTime();
+ for (long i = 0; i < N; i++) {
+ perfSOAUncheckedCall();
+ }
+ elapse = System.nanoTime() - start;
+ System.out.println("SOA unchecked call: " + (double)elapse / N);
+
+ return 0;
+ }
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ long iterations = 1000000;
+ if (args.length > 1) {
+ iterations = Long.parseLong(args[1], 10);
+ }
+ Main m = new Main();
+ m.runPerfTest(iterations);
+ System.out.println("Done");
+ }
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 82f8c79..90bf5b5 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -29,6 +29,7 @@
116-nodex2oat/nodex2oat.cc \
117-nopatchoat/nopatchoat.cc \
118-noimage-dex2oat/noimage-dex2oat.cc \
+ 1337-gc-coverage/gc_coverage.cc \
137-cfi/cfi.cc \
139-register-natives/regnative.cc \
454-get-vreg/get_vreg_jni.cc \
@@ -36,7 +37,8 @@
457-regs/regs_jni.cc \
461-get-reference-vreg/get_reference_vreg_jni.cc \
466-get-live-vreg/get_live_vreg_jni.cc \
- 497-inlining-and-class-loader/clear_dex_cache.cc
+ 497-inlining-and-class-loader/clear_dex_cache.cc \
+ 999-jni-perf/perf-jni.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 39dc030..6b57f2b 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -37,9 +37,9 @@
QUIET="n"
RELOCATE="y"
SECONDARY_DEX=""
-TIME_OUT="y"
-# Value in minutes.
-TIME_OUT_VALUE=10
+TIME_OUT="gdb" # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
+# Value in seconds
+TIME_OUT_VALUE=600 # 10 minutes.
USE_GDB="n"
USE_JVM="n"
VERIFY="y" # y=yes,n=no,s=softfail
@@ -459,15 +459,32 @@
cmdline="$dalvikvm_cmdline"
- if [ "$TIME_OUT" = "y" ]; then
+ if [ "$TIME_OUT" = "gdb" ]; then
+ if [ `uname` = "Darwin" ]; then
+ # Fall back to timeout on Mac.
+ TIME_OUT="timeout"
+ elif [ "$ISA" = "x86" ]; then
+ # prctl call may fail in 32-bit on an older (3.2) 64-bit Linux kernel. Fall back to timeout.
+ TIME_OUT="timeout"
+ else
+ # Check if gdb is available.
+ gdb --eval-command="quit" > /dev/null 2>&1
+ if [ $? != 0 ]; then
+ # gdb isn't available. Fall back to timeout.
+ TIME_OUT="timeout"
+ fi
+ fi
+ fi
+
+ if [ "$TIME_OUT" = "timeout" ]; then
# Add timeout command if time out is desired.
#
# Note: We use nested timeouts. The inner timeout sends SIGRTMIN+2 (usually 36) to ART, which
# will induce a full thread dump before abort. However, dumping threads might deadlock,
# so the outer timeout sends the regular SIGTERM after an additional minute to ensure
# termination (without dumping all threads).
- TIME_PLUS_ONE=$(($TIME_OUT_VALUE + 1))
- cmdline="timeout ${TIME_PLUS_ONE}m timeout -s SIGRTMIN+2 ${TIME_OUT_VALUE}m $cmdline"
+ TIME_PLUS_ONE=$(($TIME_OUT_VALUE + 60))
+ cmdline="timeout ${TIME_PLUS_ONE}s timeout -s SIGRTMIN+2 ${TIME_OUT_VALUE}s $cmdline"
fi
if [ "$DEV_MODE" = "y" ]; then
@@ -502,12 +519,37 @@
# When running under gdb, we cannot do piping and grepping...
$cmdline "$@"
else
- trap 'kill -INT -$pid' INT
- $cmdline "$@" 2>&1 & pid=$!
- wait $pid
- # Add extra detail if time out is enabled.
- if [ ${PIPESTATUS[0]} = 124 ] && [ "$TIME_OUT" = "y" ]; then
- echo -e "\e[91mTEST TIMED OUT!\e[0m" >&2
+ if [ "$TIME_OUT" != "gdb" ]; then
+ trap 'kill -INT -$pid' INT
+ $cmdline "$@" 2>&1 & pid=$!
+ wait $pid
+ # Add extra detail if time out is enabled.
+ if [ ${PIPESTATUS[0]} = 124 ] && [ "$TIME_OUT" = "timeout" ]; then
+ echo -e "\e[91mTEST TIMED OUT!\e[0m" >&2
+ fi
+ else
+ # With a thread dump that uses gdb if a timeout.
+ trap 'kill -INT -$pid' INT
+ $cmdline "$@" 2>&1 & pid=$!
+ # Spawn a watcher process.
+ ( sleep $TIME_OUT_VALUE && \
+ echo "##### Thread dump using gdb on test timeout" && \
+ ( gdb -q -p $pid --eval-command="info thread" --eval-command="thread apply all bt" \
+ --eval-command="call exit(124)" --eval-command=quit || \
+ kill $pid )) 2> /dev/null & watcher=$!
+ wait $pid
+ test_exit_status=$?
+ pkill -P $watcher 2> /dev/null # kill the sleep which will in turn end the watcher as well
+ if [ $test_exit_status = 0 ]; then
+ # The test finished normally.
+ exit 0
+ else
+ # The test failed or timed out.
+ if [ $test_exit_status = 124 ]; then
+ # The test timed out.
+ echo -e "\e[91mTEST TIMED OUT!\e[0m" >&2
+ fi
+ fi
fi
fi
fi
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 728991d..7ada189 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -130,28 +130,7 @@
description: "Crypto failures",
result: EXEC_FAILED,
names: ["libcore.javax.crypto.CipherTest#testCipher_ShortBlock_Failure",
- "libcore.javax.crypto.CipherTest#testCipher_Success",
- "libcore.javax.crypto.spec.AlgorithmParametersTestDESede#testAlgorithmParameters",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#testDoFinalbyteArrayintintbyteArrayint",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#testUpdatebyteArrayintintbyteArrayint",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_doFinal$BI",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_doFinal$BII$B",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_doFinalLjava_nio_ByteBufferLjava_nio_ByteBuffer",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getAlgorithm",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getBlockSize",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getInstanceLjava_lang_String",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getOutputSizeI",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithAlgorithmParameterSpec",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithKey",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithKeyAlgorithmParameterSpecSecureRandom",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithSecureRandom",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_unwrap$BLjava_lang_StringI",
- "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_updateLjava_nio_ByteBufferLjava_nio_ByteBuffer",
- "org.apache.harmony.crypto.tests.javax.crypto.func.CipherAesWrapTest#test_AesWrap",
- "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeTest#test_DESedeISO",
- "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeTest#test_DESedeNoISO",
- "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeWrapTest#test_DESedeWrap",
- "org.apache.harmony.crypto.tests.javax.crypto.func.CipherPBETest#test_PBEWithMD5AndDES"]
+ "libcore.javax.crypto.CipherTest#testCipher_Success"]
},
{
description: "Flake when running with libartd.so or interpreter",