Merge "Fixup JDWP for obsolete methods"
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 98dcf20..0683577 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -117,25 +117,6 @@
std::list<std::vector<uint8_t>> header_code_and_maps_chunks_;
};
-// TODO: When read barrier works with all Optimizing back ends, get rid of this.
-#define TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS() \
- if (kUseReadBarrier && GetCompilerKind() == Compiler::kOptimizing) { \
- switch (GetInstructionSet()) { \
- case kArm64: \
- case kThumb2: \
- case kX86: \
- case kX86_64: \
- /* Instruction set has read barrier support. */ \
- break; \
- \
- default: \
- /* Instruction set does not have barrier support. */ \
- printf("WARNING: TEST DISABLED FOR READ BARRIER WITH OPTIMIZING " \
- "FOR THIS INSTRUCTION SET\n"); \
- return; \
- } \
- }
-
} // namespace art
#endif // ART_COMPILER_COMMON_COMPILER_TEST_H_
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 35aa1ee..fa1b3a3 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -148,7 +148,6 @@
}
TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
- TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
jobject class_loader;
{
ScopedObjectAccess soa(Thread::Current());
@@ -191,7 +190,6 @@
};
TEST_F(CompilerDriverMethodsTest, Selection) {
- TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
Thread* self = Thread::Current();
jobject class_loader;
{
@@ -299,7 +297,6 @@
};
TEST_F(CompilerDriverProfileTest, ProfileGuidedCompilation) {
- TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
Thread* self = Thread::Current();
jobject class_loader;
{
diff --git a/compiler/optimizing/code_generator_vector_arm.cc b/compiler/optimizing/code_generator_vector_arm.cc
index ba2b2cb..e7f7b30 100644
--- a/compiler/optimizing/code_generator_vector_arm.cc
+++ b/compiler/optimizing/code_generator_vector_arm.cc
@@ -81,6 +81,14 @@
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderARM::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
void LocationsBuilderARM::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
}
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 96d0021..f4874fe 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -169,6 +169,37 @@
}
}
+void LocationsBuilderARM64::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ FPRegister src = DRegisterFrom(locations->InAt(0));
+ FPRegister dst = DRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Abs(dst.V8B(), src.V8B());
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Abs(dst.V4H(), src.V4H());
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Abs(dst.V2S(), src.V2S());
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Fabs(dst.V2S(), src.V2S());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ }
+}
+
void LocationsBuilderARM64::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
}
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index 1711989..74fa584 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -81,6 +81,14 @@
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderARMVIXL::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
void LocationsBuilderARMVIXL::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
}
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index 6f5fe0d..6969abd 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -81,6 +81,14 @@
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderMIPS::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
void LocationsBuilderMIPS::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
}
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 2ee7ac9..87118ce 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -81,6 +81,14 @@
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderMIPS64::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
void LocationsBuilderMIPS64::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
}
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 4f3988e..8dabb4d 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -199,6 +199,46 @@
}
}
+void LocationsBuilderX86::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ if (instruction->GetPackedType() == Primitive::kPrimInt) {
+ instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitVecAbs(HVecAbs* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimInt: {
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ __ movaps(dst, src);
+ __ pxor(tmp, tmp);
+ __ pcmpgtd(tmp, dst);
+ __ pxor(dst, tmp);
+ __ psubd(dst, tmp);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrld(dst, Immediate(1));
+ __ andps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrlq(dst, Immediate(1));
+ __ andpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
void LocationsBuilderX86::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
// Boolean-not requires a temporary to construct the 16 x one.
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index b1c1494..e956088 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -192,6 +192,46 @@
}
}
+void LocationsBuilderX86_64::VisitVecAbs(HVecAbs* instruction) {
+ CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ if (instruction->GetPackedType() == Primitive::kPrimInt) {
+ instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecAbs(HVecAbs* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimInt: {
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ __ movaps(dst, src);
+ __ pxor(tmp, tmp);
+ __ pcmpgtd(tmp, dst);
+ __ pxor(dst, tmp);
+ __ psubd(dst, tmp);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrld(dst, Immediate(1));
+ __ andps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ pcmpeqb(dst, dst); // all ones
+ __ psrlq(dst, Immediate(1));
+ __ andpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
void LocationsBuilderX86_64::VisitVecNot(HVecNot* instruction) {
CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
// Boolean-not requires a temporary to construct the 16 x one.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 80776e8..08a752f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -186,10 +186,10 @@
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations); // only saves full width XMM for SIMD
+ SaveLiveRegisters(codegen, locations); // Only saves full width XMM for SIMD.
x86_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, locations); // only saves full width XMM for SIMD
+ RestoreLiveRegisters(codegen, locations); // Only restores full width XMM for SIMD.
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 49f099f..ff6e099 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -143,10 +143,10 @@
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations); // only saves full width XMM for SIMD
+ SaveLiveRegisters(codegen, locations); // Only saves full width XMM for SIMD.
x86_64_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, locations); // only saves full width XMM for SIMD
+ RestoreLiveRegisters(codegen, locations); // Only restores full width XMM for SIMD.
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 79cd704..298ae5c 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -371,6 +371,12 @@
// invoke-virtual because a proxy method doesn't have a real dex file.
return nullptr;
}
+ if (!single_impl->GetDeclaringClass()->IsResolved()) {
+ // There's a race with the class loading, which updates the CHA info
+ // before setting the class to resolved. So we just bail for this
+ // rare occurence.
+ return nullptr;
+ }
return single_impl;
}
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 42ed04d..1a79601 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -533,29 +533,26 @@
kNoRegNumber,
0,
HPhi::ToPhiType(induc_type));
- // Generate header.
+ // Generate header and prepare body.
// for (i = lo; i < hi; i += step)
// <loop-body>
HInstruction* cond = new (global_allocator_) HAboveOrEqual(vector_phi_, hi);
vector_header_->AddPhi(vector_phi_);
vector_header_->AddInstruction(cond);
vector_header_->AddInstruction(new (global_allocator_) HIf(cond));
- // Suspend check and environment.
- HInstruction* suspend = vector_header_->GetFirstInstruction();
- suspend->CopyEnvironmentFromWithLoopPhiAdjustment(
- node->loop_info->GetSuspendCheck()->GetEnvironment(), vector_header_);
- // Generate body.
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
bool vectorized_def = VectorizeDef(node, it.Current(), /*generate_code*/ true);
DCHECK(vectorized_def);
}
+ // Generate body from the instruction map, but in original program order.
+ HEnvironment* env = vector_header_->GetFirstInstruction()->GetEnvironment();
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
auto i = vector_map_->find(it.Current());
if (i != vector_map_->end() && !i->second->IsInBlock()) {
- Insert(vector_body_, i->second); // lays out in original order
+ Insert(vector_body_, i->second);
+ // Deal with instructions that need an environment, such as the scalar intrinsics.
if (i->second->NeedsEnvironment()) {
- i->second->CopyEnvironmentFromWithLoopPhiAdjustment(
- suspend->GetEnvironment(), vector_header_);
+ i->second->CopyEnvironmentFromWithLoopPhiAdjustment(env, vector_header_);
}
}
}
@@ -735,8 +732,32 @@
return true;
}
} else if (instruction->IsInvokeStaticOrDirect()) {
- // TODO: coming soon.
- return false;
+ // Accept particular intrinsics.
+ HInvokeStaticOrDirect* invoke = instruction->AsInvokeStaticOrDirect();
+ switch (invoke->GetIntrinsic()) {
+ case Intrinsics::kMathAbsInt:
+ case Intrinsics::kMathAbsLong:
+ case Intrinsics::kMathAbsFloat:
+ case Intrinsics::kMathAbsDouble: {
+ // Deal with vector restrictions.
+ if (HasVectorRestrictions(restrictions, kNoAbs) ||
+ HasVectorRestrictions(restrictions, kNoHiBits)) {
+ // TODO: we can do better for some hibits cases.
+ return false;
+ }
+ // Accept ABS(x) for vectorizable operand.
+ HInstruction* opa = instruction->InputAt(0);
+ if (VectorizeUse(node, opa, generate_code, type, restrictions)) {
+ if (generate_code) {
+ GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
+ }
+ return true;
+ }
+ return false;
+ }
+ default:
+ return false;
+ } // switch
}
return false;
}
@@ -754,11 +775,11 @@
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoAbs;
return TrySetVectorLength(8);
case Primitive::kPrimChar:
case Primitive::kPrimShort:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoAbs;
return TrySetVectorLength(4);
case Primitive::kPrimInt:
*restrictions |= kNoDiv;
@@ -775,17 +796,17 @@
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
- *restrictions |= kNoMul | kNoDiv | kNoShift;
+ *restrictions |= kNoMul | kNoDiv | kNoShift | kNoAbs;
return TrySetVectorLength(16);
case Primitive::kPrimChar:
case Primitive::kPrimShort:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoAbs;
return TrySetVectorLength(8);
case Primitive::kPrimInt:
*restrictions |= kNoDiv;
return TrySetVectorLength(4);
case Primitive::kPrimLong:
- *restrictions |= kNoMul | kNoDiv | kNoShr;
+ *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs;
return TrySetVectorLength(2);
case Primitive::kPrimFloat:
return TrySetVectorLength(4);
@@ -956,7 +977,42 @@
new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_),
new (global_allocator_) HUShr(type, opa, opb));
case HInstruction::kInvokeStaticOrDirect: {
- // TODO: coming soon.
+ HInvokeStaticOrDirect* invoke = org->AsInvokeStaticOrDirect();
+ if (vector_mode_ == kVector) {
+ switch (invoke->GetIntrinsic()) {
+ case Intrinsics::kMathAbsInt:
+ case Intrinsics::kMathAbsLong:
+ case Intrinsics::kMathAbsFloat:
+ case Intrinsics::kMathAbsDouble:
+ DCHECK(opb == nullptr);
+ vector = new (global_allocator_) HVecAbs(global_allocator_, opa, type, vector_length_);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD intrinsic";
+ UNREACHABLE();
+ } // switch invoke
+ } else {
+ // In scalar code, simply clone the method invoke, and replace its operands with the
+ // corresponding new scalar instructions in the loop. The instruction will get an
+ // environment while being inserted from the instruction map in original program order.
+ DCHECK(vector_mode_ == kSequential);
+ HInvokeStaticOrDirect* new_invoke = new (global_allocator_) HInvokeStaticOrDirect(
+ global_allocator_,
+ invoke->GetNumberOfArguments(),
+ invoke->GetType(),
+ invoke->GetDexPc(),
+ invoke->GetDexMethodIndex(),
+ invoke->GetResolvedMethod(),
+ invoke->GetDispatchInfo(),
+ invoke->GetInvokeType(),
+ invoke->GetTargetMethod(),
+ invoke->GetClinitCheckRequirement());
+ HInputsRef inputs = invoke->GetInputs();
+ for (size_t index = 0; index < inputs.size(); ++index) {
+ new_invoke->SetArgumentAt(index, vector_map_->Get(inputs[index]));
+ }
+ vector = new_invoke;
+ }
break;
}
default:
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 16f7691..d8f50aa 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -68,6 +68,7 @@
kNoShift = 4, // no shift
kNoShr = 8, // no arithmetic shift right
kNoHiBits = 16, // "wider" operations cannot bring in higher order bits
+ kNoAbs = 32, // no absolute value
};
/*
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 5617e4b..e71fea9 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2380,11 +2380,13 @@
MakeRoomFor(&reverse_post_order_, 1, index_of_body - 1);
reverse_post_order_[index_of_body] = new_body;
- // Add gotos and suspend check (client must add conditional in header and copy environment).
+ // Add gotos and suspend check (client must add conditional in header).
new_pre_header->AddInstruction(new (arena_) HGoto());
HSuspendCheck* suspend_check = new (arena_) HSuspendCheck(header->GetDexPc());
new_header->AddInstruction(suspend_check);
new_body->AddInstruction(new (arena_) HGoto());
+ suspend_check->CopyEnvironmentFromWithLoopPhiAdjustment(
+ loop->GetSuspendCheck()->GetEnvironment(), header);
// Update loop information.
new_header->AddBackEdge(new_body);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 52a02c2..671f950 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1374,6 +1374,7 @@
M(VecSumReduce, VecUnaryOperation) \
M(VecCnv, VecUnaryOperation) \
M(VecNeg, VecUnaryOperation) \
+ M(VecAbs, VecUnaryOperation) \
M(VecNot, VecUnaryOperation) \
M(VecAdd, VecBinaryOperation) \
M(VecSub, VecBinaryOperation) \
@@ -4224,6 +4225,10 @@
dispatch_info_ = dispatch_info;
}
+ DispatchInfo GetDispatchInfo() const {
+ return dispatch_info_;
+ }
+
void AddSpecialInput(HInstruction* input) {
// We allow only one special input.
DCHECK(!IsStringInit() && !HasCurrentMethodInput());
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 9f9b918..0cbbf2a 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -278,6 +278,25 @@
DISALLOW_COPY_AND_ASSIGN(HVecNeg);
};
+// Takes absolute value of every component in the vector,
+// viz. abs[ x1, .. , xn ] = [ |x1|, .. , |xn| ].
+class HVecAbs FINAL : public HVecUnaryOperation {
+ public:
+ HVecAbs(ArenaAllocator* arena,
+ HInstruction* input,
+ Primitive::Type packed_type,
+ size_t vector_length,
+ uint32_t dex_pc = kNoDexPc)
+ : HVecUnaryOperation(arena, packed_type, vector_length, dex_pc) {
+ DCHECK(input->IsVecOperation());
+ DCHECK_EQ(input->AsVecOperation()->GetPackedType(), packed_type);
+ SetRawInputAt(0, input);
+ }
+ DECLARE_INSTRUCTION(VecAbs);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HVecAbs);
+};
+
// Bitwise- or boolean-nots every component in the vector,
// viz. not[ x1, .. , xn ] = [ ~x1, .. , ~xn ], or
// not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index eb88fde..e542cbb 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -449,17 +449,6 @@
|| instruction_set == kX86_64;
}
-// Read barrier are supported on ARM, ARM64, x86 and x86-64 at the moment.
-// TODO: Add support for other architectures and remove this function
-static bool InstructionSetSupportsReadBarrier(InstructionSet instruction_set) {
- return instruction_set == kArm64
- || instruction_set == kThumb2
- || instruction_set == kMips
- || instruction_set == kMips64
- || instruction_set == kX86
- || instruction_set == kX86_64;
-}
-
// Strip pass name suffix to get optimization name.
static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
size_t pos = pass_name.find(kPassNameSeparator);
@@ -914,12 +903,6 @@
return nullptr;
}
- // When read barriers are enabled, do not attempt to compile for
- // instruction sets that have no read barrier support.
- if (kEmitCompilerReadBarrier && !InstructionSetSupportsReadBarrier(instruction_set)) {
- return nullptr;
- }
-
if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological);
return nullptr;
@@ -1110,13 +1093,10 @@
if (kIsDebugBuild &&
IsCompilingWithCoreImage() &&
- IsInstructionSetSupported(compiler_driver->GetInstructionSet()) &&
- (!kEmitCompilerReadBarrier ||
- InstructionSetSupportsReadBarrier(compiler_driver->GetInstructionSet()))) {
+ IsInstructionSetSupported(compiler_driver->GetInstructionSet())) {
// For testing purposes, we put a special marker on method names
- // that should be compiled with this compiler (when the the
- // instruction set is supported -- and has support for read
- // barriers, if they are enabled). This makes sure we're not
+ // that should be compiled with this compiler (when the
+ // instruction set is supported). This makes sure we're not
// regressing.
std::string method_name = dex_file.PrettyMethod(method_idx);
bool shouldCompile = method_name.find("$opt$") != std::string::npos;
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 4502626..4905b5c 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -50,10 +50,11 @@
DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
LocalInfoVector& locals = debug_info->GetLocalInfo();
const char* name = entry.name_ != nullptr ? entry.name_ : "(null)";
+ const char* descriptor = entry.descriptor_ != nullptr ? entry.descriptor_ : "";
const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
locals.push_back(std::unique_ptr<LocalInfo>(
- new LocalInfo(name, entry.descriptor_, signature, entry.start_address_,
- entry.end_address_, entry.reg_)));
+ new LocalInfo(name, descriptor, signature, entry.start_address_, entry.end_address_,
+ entry.reg_)));
}
static uint32_t GetCodeItemSize(const DexFile::CodeItem& disk_code_item) {
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 4f70404..e988aac 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -191,6 +191,20 @@
"QS5qYXZhAAFJAANMQTsAEkxqYXZhL2xhbmcvT2JqZWN0OwABVgABYQABYgABYwABAAcOAAMABw4A"
"BgAHDgAJAAcOAA==";
+// Dex file with local info containing a null type descriptor.
+// Constructed a dex file with debug info sequence containing DBG_RESTART_LOCAL without any
+// DBG_START_LOCAL to give it a declared type.
+static const char kUnknownTypeDebugInfoInputDex[] =
+ "ZGV4CjAzNQBtKqZfzjHLNSNwW2A6Bz9FuCEX0sL+FF38AQAAcAAAAHhWNBIAAAAAAAAAAHQBAAAI"
+ "AAAAcAAAAAQAAACQAAAAAgAAAKAAAAAAAAAAAAAAAAMAAAC4AAAAAQAAANAAAAAMAQAA8AAAABwB"
+ "AAAkAQAALAEAAC8BAAA0AQAASAEAAEsBAABOAQAAAgAAAAMAAAAEAAAABQAAAAIAAAAAAAAAAAAA"
+ "AAUAAAADAAAAAAAAAAEAAQAAAAAAAQAAAAYAAAACAAEAAAAAAAEAAAABAAAAAgAAAAAAAAABAAAA"
+ "AAAAAGMBAAAAAAAAAQABAAEAAABUAQAABAAAAHAQAgAAAA4AAgABAAAAAABZAQAAAgAAABIQDwAG"
+ "PGluaXQ+AAZBLmphdmEAAUkAA0xBOwASTGphdmEvbGFuZy9PYmplY3Q7AAFWAAFhAAR0aGlzAAEA"
+ "Bw4AAwAHDh4GAAYAAAAAAQEAgYAE8AEBAYgCAAAACwAAAAAAAAABAAAAAAAAAAEAAAAIAAAAcAAA"
+ "AAIAAAAEAAAAkAAAAAMAAAACAAAAoAAAAAUAAAADAAAAuAAAAAYAAAABAAAA0AAAAAEgAAACAAAA"
+ "8AAAAAIgAAAIAAAAHAEAAAMgAAACAAAAVAEAAAAgAAABAAAAYwEAAAAQAAABAAAAdAEAAA==";
+
static void WriteBase64ToFile(const char* base64, File* file) {
// Decode base64.
CHECK(base64 != nullptr);
@@ -290,7 +304,7 @@
return true;
}
- // Runs DexFileOutput test.
+ // Runs DexFileLayout test.
bool DexFileLayoutExec(std::string* error_msg) {
ScratchFile tmp_file;
std::string tmp_name = tmp_file.GetFilename();
@@ -356,6 +370,26 @@
}
return true;
}
+
+ bool DexLayoutExec(ScratchFile* dex_file,
+ const char* dex_filename,
+ ScratchFile* profile_file,
+ const char* profile_filename,
+ std::vector<std::string>& dexlayout_exec_argv) {
+ WriteBase64ToFile(dex_filename, dex_file->GetFile());
+ EXPECT_EQ(dex_file->GetFile()->Flush(), 0);
+ if (profile_file != nullptr) {
+ WriteBase64ToFile(profile_filename, profile_file->GetFile());
+ EXPECT_EQ(profile_file->GetFile()->Flush(), 0);
+ }
+ std::string error_msg;
+ const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
+ if (!result) {
+ LOG(ERROR) << "Error: " << error_msg;
+ return false;
+ }
+ return true;
+ }
};
@@ -405,94 +439,84 @@
}
TEST_F(DexLayoutTest, DuplicateOffset) {
- ScratchFile temp;
- WriteBase64ToFile(kDexFileDuplicateOffset, temp.GetFile());
- EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ ScratchFile temp_dex;
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
- std::vector<std::string> dexlayout_exec_argv = {
- dexlayout,
- "-a",
- "-i",
- "-o",
- "/dev/null",
- temp.GetFilename()};
- std::string error_msg;
- const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
- EXPECT_TRUE(result);
- if (!result) {
- LOG(ERROR) << "Error " << error_msg;
- }
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-a", "-i", "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kDexFileDuplicateOffset,
+ nullptr /* profile_file */,
+ nullptr /* profile_filename */,
+ dexlayout_exec_argv));
}
TEST_F(DexLayoutTest, NullSetRefListElement) {
- ScratchFile temp;
- WriteBase64ToFile(kNullSetRefListElementInputDex, temp.GetFile());
- EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ ScratchFile temp_dex;
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
std::vector<std::string> dexlayout_exec_argv =
- { dexlayout, "-o", "/dev/null", temp.GetFilename() };
- std::string error_msg;
- const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
- EXPECT_TRUE(result);
- if (!result) {
- LOG(ERROR) << "Error " << error_msg;
- }
+ { dexlayout, "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kNullSetRefListElementInputDex,
+ nullptr /* profile_file */,
+ nullptr /* profile_filename */,
+ dexlayout_exec_argv));
}
TEST_F(DexLayoutTest, MultiClassData) {
- ScratchFile temp;
- WriteBase64ToFile(kMultiClassDataInputDex, temp.GetFile());
- ScratchFile temp2;
- WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
- EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ ScratchFile temp_dex;
+ ScratchFile temp_profile;
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
std::vector<std::string> dexlayout_exec_argv =
- { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
- std::string error_msg;
- const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
- EXPECT_TRUE(result);
- if (!result) {
- LOG(ERROR) << "Error " << error_msg;
- }
+ { dexlayout, "-p", temp_profile.GetFilename(), "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kMultiClassDataInputDex,
+ &temp_profile,
+ kDexFileLayoutInputProfile,
+ dexlayout_exec_argv));
}
TEST_F(DexLayoutTest, UnalignedCodeInfo) {
- ScratchFile temp;
- WriteBase64ToFile(kUnalignedCodeInfoInputDex, temp.GetFile());
- ScratchFile temp2;
- WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
- EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ ScratchFile temp_dex;
+ ScratchFile temp_profile;
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
std::vector<std::string> dexlayout_exec_argv =
- { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
- std::string error_msg;
- const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
- EXPECT_TRUE(result);
- if (!result) {
- LOG(ERROR) << "Error " << error_msg;
- }
+ { dexlayout, "-p", temp_profile.GetFilename(), "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kUnalignedCodeInfoInputDex,
+ &temp_profile,
+ kDexFileLayoutInputProfile,
+ dexlayout_exec_argv));
}
TEST_F(DexLayoutTest, ClassDataBeforeCode) {
- ScratchFile temp;
- WriteBase64ToFile(kClassDataBeforeCodeInputDex, temp.GetFile());
- ScratchFile temp2;
- WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
- EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ ScratchFile temp_dex;
+ ScratchFile temp_profile;
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
std::vector<std::string> dexlayout_exec_argv =
- { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
- std::string error_msg;
- const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
- EXPECT_TRUE(result);
- if (!result) {
- LOG(ERROR) << "Error " << error_msg;
- }
+ { dexlayout, "-p", temp_profile.GetFilename(), "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kClassDataBeforeCodeInputDex,
+ &temp_profile,
+ kDexFileLayoutInputProfile,
+ dexlayout_exec_argv));
+}
+
+TEST_F(DexLayoutTest, UnknownTypeDebugInfo) {
+ ScratchFile temp_dex;
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kUnknownTypeDebugInfoInputDex,
+ nullptr /* profile_file */,
+ nullptr /* profile_filename */,
+ dexlayout_exec_argv));
}
} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index 6d5dd6d..3582351 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -34,6 +34,18 @@
EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", krait_features->GetFeatureString().c_str());
EXPECT_EQ(krait_features->AsBitmap(), 3U);
+ // Build features for a 32-bit ARM kryo processor.
+ std::unique_ptr<const InstructionSetFeatures> kryo_features(
+ InstructionSetFeatures::FromVariant(kArm, "kryo", &error_msg));
+ ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
+
+ ASSERT_EQ(kryo_features->GetInstructionSet(), kArm);
+ EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", kryo_features->GetFeatureString().c_str());
+ EXPECT_EQ(kryo_features->AsBitmap(), 7U);
+
// Build features for a 32-bit ARM denver processor.
std::unique_ptr<const InstructionSetFeatures> denver_features(
InstructionSetFeatures::FromVariant(kArm, "denver", &error_msg));
@@ -86,6 +98,18 @@
EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", krait_features->GetFeatureString().c_str());
EXPECT_EQ(krait_features->AsBitmap(), 3U);
+ // Build features for a 32-bit ARM with LPAE and div processor.
+ std::unique_ptr<const InstructionSetFeatures> kryo_features(
+ base_features->AddFeaturesFromString("atomic_ldrd_strd,div", &error_msg));
+ ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
+
+ ASSERT_EQ(kryo_features->GetInstructionSet(), kArm);
+ EXPECT_TRUE(kryo_features->Equals(krait_features.get()));
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", kryo_features->GetFeatureString().c_str());
+ EXPECT_EQ(kryo_features->AsBitmap(), 3U);
+
// Build features for a 32-bit ARM processor with LPAE and div flipped.
std::unique_ptr<const InstructionSetFeatures> denver_features(
base_features->AddFeaturesFromString("div,atomic_ldrd_strd,armv8a", &error_msg));
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index e254dfe..2f70ded 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -509,7 +509,6 @@
};
TEST_F(ReflectionTest, StaticMainMethod) {
- TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Main");
StackHandleScope<1> hs(soa.Self());
diff --git a/sigchainlib/OWNERS b/sigchainlib/OWNERS
new file mode 100644
index 0000000..450fc12
--- /dev/null
+++ b/sigchainlib/OWNERS
@@ -0,0 +1,4 @@
+# Default maintainers and code reviewers:
+jmgao@google.com
+dimitry@google.com
+sehr@google.com
diff --git a/test/640-checker-float-simd/src/Main.java b/test/640-checker-float-simd/src/Main.java
index 80c3112..4bcb7e2 100644
--- a/test/640-checker-float-simd/src/Main.java
+++ b/test/640-checker-float-simd/src/Main.java
@@ -107,8 +107,10 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.abs() loop_optimization (after)
- //
- // TODO: fill in when supported
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void abs() {
for (int i = 0; i < 128; i++)
a[i] = Math.abs(a[i]);
diff --git a/test/645-checker-abs-simd/expected.txt b/test/645-checker-abs-simd/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/645-checker-abs-simd/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/645-checker-abs-simd/info.txt b/test/645-checker-abs-simd/info.txt
new file mode 100644
index 0000000..8fa4066
--- /dev/null
+++ b/test/645-checker-abs-simd/info.txt
@@ -0,0 +1 @@
+Functional tests on abs SIMD vectorization.
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
new file mode 100644
index 0000000..3111350
--- /dev/null
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for ABS vectorization.
+ */
+public class Main {
+
+ private static final int SPQUIET = 1 << 22;
+ private static final long DPQUIET = 1L << 51;
+
+ private static void doitInt(int[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ private static void doitLong(long[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ private static void doitFloat(float[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ private static void doitDouble(double[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = Math.abs(x[i]);
+ }
+ }
+
+ public static void main(String[] args) {
+ // Set up minint32, maxint32 and some others.
+ int[] xi = new int[8];
+ xi[0] = 0x80000000;
+ xi[1] = 0x7fffffff;
+ xi[2] = 0x80000001;
+ xi[3] = -13;
+ xi[4] = -1;
+ xi[5] = 0;
+ xi[6] = 1;
+ xi[7] = 999;
+ doitInt(xi);
+ expectEquals32(0x80000000, xi[0]);
+ expectEquals32(0x7fffffff, xi[1]);
+ expectEquals32(0x7fffffff, xi[2]);
+ expectEquals32(13, xi[3]);
+ expectEquals32(1, xi[4]);
+ expectEquals32(0, xi[5]);
+ expectEquals32(1, xi[6]);
+ expectEquals32(999, xi[7]);
+
+ // Set up minint64, maxint64 and some others.
+ long[] xl = new long[8];
+ xl[0] = 0x8000000000000000L;
+ xl[1] = 0x7fffffffffffffffL;
+ xl[2] = 0x8000000000000001L;
+ xl[3] = -13;
+ xl[4] = -1;
+ xl[5] = 0;
+ xl[6] = 1;
+ xl[7] = 999;
+ doitLong(xl);
+ expectEquals64(0x8000000000000000L, xl[0]);
+ expectEquals64(0x7fffffffffffffffL, xl[1]);
+ expectEquals64(0x7fffffffffffffffL, xl[2]);
+ expectEquals64(13, xl[3]);
+ expectEquals64(1, xl[4]);
+ expectEquals64(0, xl[5]);
+ expectEquals64(1, xl[6]);
+ expectEquals64(999, xl[7]);
+
+ // Set up float NaN and some others.
+ float[] xf = new float[16];
+ xf[0] = Float.intBitsToFloat(0x7f800001);
+ xf[1] = Float.intBitsToFloat(0x7fa00000);
+ xf[2] = Float.intBitsToFloat(0x7fc00000);
+ xf[3] = Float.intBitsToFloat(0x7fffffff);
+ xf[4] = Float.intBitsToFloat(0xff800001);
+ xf[5] = Float.intBitsToFloat(0xffa00000);
+ xf[6] = Float.intBitsToFloat(0xffc00000);
+ xf[7] = Float.intBitsToFloat(0xffffffff);
+ xf[8] = Float.NEGATIVE_INFINITY;
+ xf[9] = -99.2f;
+ xf[10] = -1.0f;
+ xf[11] = -0.0f;
+ xf[12] = +0.0f;
+ xf[13] = +1.0f;
+ xf[14] = +99.2f;
+ xf[15] = Float.POSITIVE_INFINITY;
+ doitFloat(xf);
+ expectEqualsNaN32(0x7f800001, Float.floatToRawIntBits(xf[0]));
+ expectEqualsNaN32(0x7fa00000, Float.floatToRawIntBits(xf[1]));
+ expectEqualsNaN32(0x7fc00000, Float.floatToRawIntBits(xf[2]));
+ expectEqualsNaN32(0x7fffffff, Float.floatToRawIntBits(xf[3]));
+ expectEqualsNaN32(0x7f800001, Float.floatToRawIntBits(xf[4]));
+ expectEqualsNaN32(0x7fa00000, Float.floatToRawIntBits(xf[5]));
+ expectEqualsNaN32(0x7fc00000, Float.floatToRawIntBits(xf[6]));
+ expectEqualsNaN32(0x7fffffff, Float.floatToRawIntBits(xf[7]));
+ expectEquals32(
+ Float.floatToRawIntBits(Float.POSITIVE_INFINITY),
+ Float.floatToRawIntBits(xf[8]));
+ expectEquals32(
+ Float.floatToRawIntBits(99.2f),
+ Float.floatToRawIntBits(xf[9]));
+ expectEquals32(
+ Float.floatToRawIntBits(1.0f),
+ Float.floatToRawIntBits(xf[10]));
+ expectEquals32(0, Float.floatToRawIntBits(xf[11]));
+ expectEquals32(0, Float.floatToRawIntBits(xf[12]));
+ expectEquals32(
+ Float.floatToRawIntBits(1.0f),
+ Float.floatToRawIntBits(xf[13]));
+ expectEquals32(
+ Float.floatToRawIntBits(99.2f),
+ Float.floatToRawIntBits(xf[14]));
+ expectEquals32(
+ Float.floatToRawIntBits(Float.POSITIVE_INFINITY),
+ Float.floatToRawIntBits(xf[15]));
+
+ // Set up double NaN and some others.
+ double[] xd = new double[16];
+ xd[0] = Double.longBitsToDouble(0x7ff0000000000001L);
+ xd[1] = Double.longBitsToDouble(0x7ff4000000000000L);
+ xd[2] = Double.longBitsToDouble(0x7ff8000000000000L);
+ xd[3] = Double.longBitsToDouble(0x7fffffffffffffffL);
+ xd[4] = Double.longBitsToDouble(0xfff0000000000001L);
+ xd[5] = Double.longBitsToDouble(0xfff4000000000000L);
+ xd[6] = Double.longBitsToDouble(0xfff8000000000000L);
+ xd[7] = Double.longBitsToDouble(0xffffffffffffffffL);
+ xd[8] = Double.NEGATIVE_INFINITY;
+ xd[9] = -99.2f;
+ xd[10] = -1.0f;
+ xd[11] = -0.0f;
+ xd[12] = +0.0f;
+ xd[13] = +1.0f;
+ xd[14] = +99.2f;
+ xd[15] = Double.POSITIVE_INFINITY;
+ doitDouble(xd);
+ expectEqualsNaN64(0x7ff0000000000001L, Double.doubleToRawLongBits(xd[0]));
+ expectEqualsNaN64(0x7ff4000000000000L, Double.doubleToRawLongBits(xd[1]));
+ expectEqualsNaN64(0x7ff8000000000000L, Double.doubleToRawLongBits(xd[2]));
+ expectEqualsNaN64(0x7fffffffffffffffL, Double.doubleToRawLongBits(xd[3]));
+ expectEqualsNaN64(0x7ff0000000000001L, Double.doubleToRawLongBits(xd[4]));
+ expectEqualsNaN64(0x7ff4000000000000L, Double.doubleToRawLongBits(xd[5]));
+ expectEqualsNaN64(0x7ff8000000000000L, Double.doubleToRawLongBits(xd[6]));
+ expectEqualsNaN64(0x7fffffffffffffffL, Double.doubleToRawLongBits(xd[7]));
+ expectEquals64(
+ Double.doubleToRawLongBits(Double.POSITIVE_INFINITY),
+ Double.doubleToRawLongBits(xd[8]));
+ expectEquals64(
+ Double.doubleToRawLongBits(99.2f),
+ Double.doubleToRawLongBits(xd[9]));
+ expectEquals64(
+ Double.doubleToRawLongBits(1.0f),
+ Double.doubleToRawLongBits(xd[10]));
+ expectEquals64(0, Double.doubleToRawLongBits(xd[11]));
+ expectEquals64(0, Double.doubleToRawLongBits(xd[12]));
+ expectEquals64(
+ Double.doubleToRawLongBits(1.0f),
+ Double.doubleToRawLongBits(xd[13]));
+ expectEquals64(
+ Double.doubleToRawLongBits(99.2f),
+ Double.doubleToRawLongBits(xd[14]));
+ expectEquals64(
+ Double.doubleToRawLongBits(Double.POSITIVE_INFINITY),
+ Double.doubleToRawLongBits(xd[15]));
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals32(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals64(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ // We allow that an expected NaN result has become quiet.
+ private static void expectEqualsNaN32(int expected, int result) {
+ if (expected != result && (expected | SPQUIET) != result) {
+ throw new Error("Expected: 0x" + Integer.toHexString(expected)
+ + ", found: 0x" + Integer.toHexString(result));
+ }
+ }
+
+ // We allow that an expected NaN result has become quiet.
+ private static void expectEqualsNaN64(long expected, long result) {
+ if (expected != result && (expected | DPQUIET) != result) {
+ throw new Error("Expected: 0x" + Long.toHexString(expected)
+ + ", found: 0x" + Long.toHexString(result));
+ }
+ }
+}
diff --git a/test/run-test b/test/run-test
index 91ffdfa..e46099d 100755
--- a/test/run-test
+++ b/test/run-test
@@ -716,36 +716,13 @@
export TEST_NAME=`basename ${test_dir}`
-# arch_supports_read_barrier ARCH
-# -------------------------------
-# Return whether the Optimizing compiler has read barrier support for ARCH.
-function arch_supports_read_barrier() {
- # Optimizing has read barrier support for ARM, ARM64, x86 and x86-64 at the
- # moment.
- [ "x$1" = xarm ] || [ "x$1" = xarm64 ] || [ "x$1" = xx86 ] || [ "x$1" = xx86_64 ]
-}
-
# Tests named '<number>-checker-*' will also have their CFGs verified with
# Checker when compiled with Optimizing on host.
if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
if [ "$runtime" = "art" -a "$image_suffix" = "" -a "$USE_JACK" = "true" ]; then
- # Optimizing has read barrier support for certain architectures
- # only. On other architectures, compiling is disabled when read
- # barriers are enabled, meaning that we do not produce a CFG file
- # as a side-effect of compilation, thus the Checker assertions
- # cannot be checked. Disable Checker for those cases.
- #
- # TODO: Enable Checker when read barrier support is added to more
- # architectures (b/12687968).
- if [ "x$ART_USE_READ_BARRIER" != xfalse ] \
- && (([ "x$host_mode" = "xyes" ] \
- && ! arch_supports_read_barrier "$host_arch_name") \
- || ([ "x$target_mode" = "xyes" ] \
- && ! arch_supports_read_barrier "$target_arch_name")); then
- run_checker="no"
# In no-prebuild mode, the compiler is only invoked if both dex2oat and
# patchoat are available. Disable Checker otherwise (b/22552692).
- elif [ "$prebuild_mode" = "yes" ] \
+ if [ "$prebuild_mode" = "yes" ] \
|| [ "$have_patchoat" = "yes" -a "$have_dex2oat" = "yes" ]; then
run_checker="yes"
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 49dc657..6a8b0ae 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -499,7 +499,7 @@
else:
print_test_info(test_name, '')
except subprocess.TimeoutExpired as e:
- failed_tests.append((test_name, 'Timed out in %d seconds'))
+ failed_tests.append((test_name, 'Timed out in %d seconds' % timeout))
print_test_info(test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (
timeout, command))
except Exception as e:
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 55b2c59..07d7fb8 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -209,55 +209,5 @@
modes: [device],
names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit",
"libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"]
-},
-{
- description: "Linker issues with libjavacoretests",
- result: EXEC_FAILED,
- bug: 35417197,
- modes: [device],
- names: [
- "dalvik.system.JniTest#testGetSuperclass",
- "dalvik.system.JniTest#testPassingBooleans",
- "dalvik.system.JniTest#testPassingBytes",
- "dalvik.system.JniTest#testPassingChars",
- "dalvik.system.JniTest#testPassingClass",
- "dalvik.system.JniTest#testPassingDoubles",
- "dalvik.system.JniTest#testPassingFloats",
- "dalvik.system.JniTest#testPassingInts",
- "dalvik.system.JniTest#testPassingLongs",
- "dalvik.system.JniTest#testPassingObjectReferences",
- "dalvik.system.JniTest#testPassingShorts",
- "dalvik.system.JniTest#testPassingThis",
- "libcore.java.lang.OldSystemTest#test_load",
- "libcore.java.lang.ThreadTest#testContextClassLoaderIsInherited",
- "libcore.java.lang.ThreadTest#testContextClassLoaderIsNotNull",
- "libcore.java.lang.ThreadTest#testGetAllStackTracesIncludesAllGroups",
- "libcore.java.lang.ThreadTest#testGetStackTrace",
- "libcore.java.lang.ThreadTest#testJavaContextClassLoader",
- "libcore.java.lang.ThreadTest#testLeakingStartedThreads",
- "libcore.java.lang.ThreadTest#testLeakingUnstartedThreads",
- "libcore.java.lang.ThreadTest#testNativeThreadNames",
- "libcore.java.lang.ThreadTest#testParkUntilWithUnderflowValue",
- "libcore.java.lang.ThreadTest#testThreadDoubleStart",
- "libcore.java.lang.ThreadTest#testThreadInterrupted",
- "libcore.java.lang.ThreadTest#testThreadRestart",
- "libcore.java.lang.ThreadTest#testThreadSleep",
- "libcore.java.lang.ThreadTest#testThreadSleepIllegalArguments",
- "libcore.java.lang.ThreadTest#testThreadWakeup",
- "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_calledBeforeDefaultHandler",
- "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_noDefaultHandler",
- "libcore.java.util.TimeZoneTest#testDisplayNamesWithScript",
- "libcore.java.util.zip.ZipEntryTest#testCommentAndExtraInSameOrder",
- "libcore.java.util.zip.ZipEntryTest#testMaxLengthExtra",
- "libcore.util.NativeAllocationRegistryTest#testBadSize",
- "libcore.util.NativeAllocationRegistryTest#testEarlyFree",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndNoSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndNoSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNullArguments",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_y",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_yy"
- ]
}
]