Merge "Revert "cleanup: Replace pointers with out-parameters and fix-up formatting""
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 05cfc42..3a1bd09 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -209,6 +209,11 @@
external/vixl/src \
external/zlib \
+# We optimize Thread::Current() with a direct TLS access. This requires access to a private
+# Bionic header.
+# Note: technically we only need this on device, but this avoids the duplication of the includes.
+ART_C_INCLUDES += bionic/libc/private
+
# Base set of cflags used by all things ART.
art_cflags := \
-fno-rtti \
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index dda36fa..0b26077 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -719,7 +719,7 @@
}
// InternImageString allows us to intern while holding the heap bitmap lock. This is safe since
// we are guaranteed to not have GC during image writing.
- mirror::String* const interned = Runtime::Current()->GetInternTable()->InternImageString(
+ mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString(
obj->AsString());
if (obj != interned) {
if (!IsImageBinSlotAssigned(interned)) {
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 52a3a15..8841498 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -357,9 +357,10 @@
HInstruction* first_insn = block->GetFirstInstruction();
if (first_insn->IsLoadException()) {
// Catch block starts with a LoadException. Split the block after the
- // StoreLocal that must come after the load.
+ // StoreLocal and ClearException which must come after the load.
DCHECK(first_insn->GetNext()->IsStoreLocal());
- block = block->SplitBefore(first_insn->GetNext()->GetNext());
+ DCHECK(first_insn->GetNext()->GetNext()->IsClearException());
+ block = block->SplitBefore(first_insn->GetNext()->GetNext()->GetNext());
} else {
// Catch block does not load the exception. Split at the beginning to
// create an empty catch block.
@@ -2552,6 +2553,7 @@
case Instruction::MOVE_EXCEPTION: {
current_block_->AddInstruction(new (arena_) HLoadException());
UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) HClearException());
break;
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 75b8f06..b0a4ce2 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -4290,6 +4290,10 @@
__ Bind(slow_path->GetExitLabel());
}
+static int32_t GetExceptionTlsOffset() {
+ return Thread::ExceptionOffset<kArmWordSize>().Int32Value();
+}
+
void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
@@ -4298,10 +4302,16 @@
void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
Register out = load->GetLocations()->Out().AsRegister<Register>();
- int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
- __ LoadFromOffset(kLoadWord, out, TR, offset);
+ __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderARM::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorARM::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
__ LoadImmediate(IP, 0);
- __ StoreToOffset(kStoreWord, IP, TR, offset);
+ __ StoreToOffset(kStoreWord, IP, TR, GetExceptionTlsOffset());
}
void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 11de4ee..bbde7e8 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2496,6 +2496,10 @@
}
}
+static MemOperand GetExceptionTlsAddress() {
+ return MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
+}
+
void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
@@ -2503,9 +2507,15 @@
}
void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
- MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
- __ Ldr(OutputRegister(instruction), exception);
- __ Str(wzr, exception);
+ __ Ldr(OutputRegister(instruction), GetExceptionTlsAddress());
+}
+
+void LocationsBuilderARM64::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ Str(wzr, GetExceptionTlsAddress());
}
void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index e7d2ec6..a5bad65 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2544,6 +2544,10 @@
}
}
+static int32_t GetExceptionTlsOffset() {
+ return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
+}
+
void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
@@ -2552,8 +2556,15 @@
void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
- __ LoadFromOffset(kLoadUnsignedWord, out, TR, Thread::ExceptionOffset<kMips64WordSize>().Int32Value());
- __ StoreToOffset(kStoreWord, ZERO, TR, Thread::ExceptionOffset<kMips64WordSize>().Int32Value());
+ __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
}
void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 0569565..fdef06b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4693,6 +4693,10 @@
__ Bind(slow_path->GetExitLabel());
}
+static Address GetExceptionTlsAddress() {
+ return Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
+}
+
void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
@@ -4700,9 +4704,15 @@
}
void InstructionCodeGeneratorX86::VisitLoadException(HLoadException* load) {
- Address address = Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
- __ fs()->movl(load->GetLocations()->Out().AsRegister<Register>(), address);
- __ fs()->movl(address, Immediate(0));
+ __ fs()->movl(load->GetLocations()->Out().AsRegister<Register>(), GetExceptionTlsAddress());
+}
+
+void LocationsBuilderX86::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorX86::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ fs()->movl(GetExceptionTlsAddress(), Immediate(0));
}
void LocationsBuilderX86::VisitThrow(HThrow* instruction) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 287737b..4fe93f9 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4527,6 +4527,10 @@
__ Bind(slow_path->GetExitLabel());
}
+static Address GetExceptionTlsAddress() {
+ return Address::Absolute(Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true);
+}
+
void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
@@ -4534,10 +4538,15 @@
}
void InstructionCodeGeneratorX86_64::VisitLoadException(HLoadException* load) {
- Address address = Address::Absolute(
- Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true);
- __ gs()->movl(load->GetLocations()->Out().AsRegister<CpuRegister>(), address);
- __ gs()->movl(address, Immediate(0));
+ __ gs()->movl(load->GetLocations()->Out().AsRegister<CpuRegister>(), GetExceptionTlsAddress());
+}
+
+void LocationsBuilderX86_64::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorX86_64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ gs()->movl(GetExceptionTlsAddress(), Immediate(0));
}
void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) {
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index 11f6362..10e4bc9 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -81,7 +81,7 @@
* offset
* ------
* v0 <- 1 0. const/4 v0, #+1
- * v1 <- -v0 1. neg-int v0, v1
+ * v1 <- -v0 1. neg-int v1, v0
* return v1 2. return v1
*/
TEST(ConstantFolding, IntConstantFoldingNegation) {
@@ -132,6 +132,69 @@
}
/**
+ * Tiny three-register program exercising long constant folding on negation.
+ *
+ * 16-bit
+ * offset
+ * ------
+ * (v0, v1) <- 4294967296 0. const-wide v0 #+4294967296
+ * (v2, v3) <- -(v0, v1) 1. neg-long v2, v0
+ * return (v2, v3) 2. return-wide v2
+ */
+TEST(ConstantFolding, LongConstantFoldingNegation) {
+ const int64_t input = INT64_C(4294967296); // 2^32
+ const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW.
+ const uint16_t word1 = High16Bits(Low32Bits(input));
+ const uint16_t word2 = Low16Bits(High32Bits(input));
+ const uint16_t word3 = High16Bits(High32Bits(input)); // MSW.
+ const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
+ Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3,
+ Instruction::NEG_LONG | 2 << 8 | 0 << 12,
+ Instruction::RETURN_WIDE | 2 << 8);
+
+ std::string expected_before =
+ "BasicBlock 0, succ: 1\n"
+ " 4: LongConstant [7]\n"
+ " 12: SuspendCheck\n"
+ " 13: Goto 1\n"
+ "BasicBlock 1, pred: 0, succ: 2\n"
+ " 7: Neg(4) [10]\n"
+ " 10: Return(7)\n"
+ "BasicBlock 2, pred: 1\n"
+ " 11: Exit\n";
+
+ // Expected difference after constant folding.
+ diff_t expected_cf_diff = {
+ { " 4: LongConstant [7]\n", " 4: LongConstant\n" },
+ { " 12: SuspendCheck\n", " 12: SuspendCheck\n"
+ " 14: LongConstant [10]\n" },
+ { " 7: Neg(4) [10]\n", removed },
+ { " 10: Return(7)\n", " 10: Return(14)\n" }
+ };
+ std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
+
+ // Check the value of the computed constant.
+ auto check_after_cf = [](HGraph* graph) {
+ HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
+ ASSERT_TRUE(inst->IsLongConstant());
+ ASSERT_EQ(inst->AsLongConstant()->GetValue(), INT64_C(-4294967296));
+ };
+
+ // Expected difference after dead code elimination.
+ diff_t expected_dce_diff = {
+ { " 4: LongConstant\n", removed },
+ };
+ std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
+
+ TestCode(data,
+ expected_before,
+ expected_after_cf,
+ expected_after_dce,
+ check_after_cf,
+ Primitive::kPrimLong);
+}
+
+/**
* Tiny three-register program exercising int constant folding on addition.
*
* 16-bit
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 8ab0b77..62cdb4c 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -690,7 +690,7 @@
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
return;
}
@@ -732,6 +732,9 @@
// And truncate to an integer.
__ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
+ // Load maxInt into out.
+ codegen_->Load64BitValue(out, kPrimIntMax);
+
// if inPlusPointFive >= maxInt goto done
__ comiss(inPlusPointFive, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax)));
__ j(kAboveEqual, &done);
@@ -776,6 +779,9 @@
// And truncate to an integer.
__ roundsd(inPlusPointFive, inPlusPointFive, Immediate(1));
+ // Load maxLong into out.
+ codegen_->Load64BitValue(out, kPrimLongMax);
+
// if inPlusPointFive >= maxLong goto done
__ comisd(inPlusPointFive, codegen_->LiteralDoubleAddress(static_cast<double>(kPrimLongMax)));
__ j(kAboveEqual, &done);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 61dadc2..b6a1980 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1012,35 +1012,25 @@
HConstant* HUnaryOperation::TryStaticEvaluation() const {
if (GetInput()->IsIntConstant()) {
- int32_t value = Evaluate(GetInput()->AsIntConstant()->GetValue());
- return GetBlock()->GetGraph()->GetIntConstant(value);
+ return Evaluate(GetInput()->AsIntConstant());
} else if (GetInput()->IsLongConstant()) {
- // TODO: Implement static evaluation of long unary operations.
- //
- // Do not exit with a fatal condition here. Instead, simply
- // return `null' to notify the caller that this instruction
- // cannot (yet) be statically evaluated.
- return nullptr;
+ return Evaluate(GetInput()->AsLongConstant());
}
return nullptr;
}
HConstant* HBinaryOperation::TryStaticEvaluation() const {
- if (GetLeft()->IsIntConstant() && GetRight()->IsIntConstant()) {
- int32_t value = Evaluate(GetLeft()->AsIntConstant()->GetValue(),
- GetRight()->AsIntConstant()->GetValue());
- return GetBlock()->GetGraph()->GetIntConstant(value);
- } else if (GetLeft()->IsLongConstant() && GetRight()->IsLongConstant()) {
- int64_t value = Evaluate(GetLeft()->AsLongConstant()->GetValue(),
- GetRight()->AsLongConstant()->GetValue());
- if (GetResultType() == Primitive::kPrimLong) {
- return GetBlock()->GetGraph()->GetLongConstant(value);
- } else if (GetResultType() == Primitive::kPrimBoolean) {
- // This can be the result of an HCondition evaluation.
- return GetBlock()->GetGraph()->GetIntConstant(static_cast<int32_t>(value));
- } else {
- DCHECK_EQ(GetResultType(), Primitive::kPrimInt);
- return GetBlock()->GetGraph()->GetIntConstant(static_cast<int32_t>(value));
+ if (GetLeft()->IsIntConstant()) {
+ if (GetRight()->IsIntConstant()) {
+ return Evaluate(GetLeft()->AsIntConstant(), GetRight()->AsIntConstant());
+ } else if (GetRight()->IsLongConstant()) {
+ return Evaluate(GetLeft()->AsIntConstant(), GetRight()->AsLongConstant());
+ }
+ } else if (GetLeft()->IsLongConstant()) {
+ if (GetRight()->IsIntConstant()) {
+ return Evaluate(GetLeft()->AsLongConstant(), GetRight()->AsIntConstant());
+ } else if (GetRight()->IsLongConstant()) {
+ return Evaluate(GetLeft()->AsLongConstant(), GetRight()->AsLongConstant());
}
}
return nullptr;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9b8521d..814cebb 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -17,6 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_NODES_H_
#define ART_COMPILER_OPTIMIZING_NODES_H_
+#include <type_traits>
+
#include "base/arena_containers.h"
#include "base/arena_object.h"
#include "dex/compiler_enums.h"
@@ -961,6 +963,7 @@
M(BoundsCheck, Instruction) \
M(BoundType, Instruction) \
M(CheckCast, Instruction) \
+ M(ClearException, Instruction) \
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
@@ -1581,6 +1584,7 @@
HInstruction* GetPreviousDisregardingMoves() const;
HBasicBlock* GetBlock() const { return block_; }
+ ArenaAllocator* GetArena() const { return block_->GetGraph()->GetArena(); }
void SetBlock(HBasicBlock* block) { block_ = block; }
bool IsInBlock() const { return block_ != nullptr; }
bool IsInLoop() const { return block_->IsInLoop(); }
@@ -2047,6 +2051,95 @@
DISALLOW_COPY_AND_ASSIGN(HGoto);
};
+class HConstant : public HExpression<0> {
+ public:
+ explicit HConstant(Primitive::Type type) : HExpression(type, SideEffects::None()) {}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ virtual bool IsMinusOne() const { return false; }
+ virtual bool IsZero() const { return false; }
+ virtual bool IsOne() const { return false; }
+
+ DECLARE_INSTRUCTION(Constant);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HConstant);
+};
+
+class HNullConstant : public HConstant {
+ public:
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ return true;
+ }
+
+ size_t ComputeHashCode() const OVERRIDE { return 0; }
+
+ DECLARE_INSTRUCTION(NullConstant);
+
+ private:
+ HNullConstant() : HConstant(Primitive::kPrimNot) {}
+
+ friend class HGraph;
+ DISALLOW_COPY_AND_ASSIGN(HNullConstant);
+};
+
+// Constants of the type int. Those can be from Dex instructions, or
+// synthesized (for example with the if-eqz instruction).
+class HIntConstant : public HConstant {
+ public:
+ int32_t GetValue() const { return value_; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsIntConstant());
+ return other->AsIntConstant()->value_ == value_;
+ }
+
+ size_t ComputeHashCode() const OVERRIDE { return GetValue(); }
+
+ bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
+ bool IsZero() const OVERRIDE { return GetValue() == 0; }
+ bool IsOne() const OVERRIDE { return GetValue() == 1; }
+
+ DECLARE_INSTRUCTION(IntConstant);
+
+ private:
+ explicit HIntConstant(int32_t value) : HConstant(Primitive::kPrimInt), value_(value) {}
+ explicit HIntConstant(bool value) : HConstant(Primitive::kPrimInt), value_(value ? 1 : 0) {}
+
+ const int32_t value_;
+
+ friend class HGraph;
+ ART_FRIEND_TEST(GraphTest, InsertInstructionBefore);
+ ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast);
+ DISALLOW_COPY_AND_ASSIGN(HIntConstant);
+};
+
+class HLongConstant : public HConstant {
+ public:
+ int64_t GetValue() const { return value_; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsLongConstant());
+ return other->AsLongConstant()->value_ == value_;
+ }
+
+ size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+
+ bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
+ bool IsZero() const OVERRIDE { return GetValue() == 0; }
+ bool IsOne() const OVERRIDE { return GetValue() == 1; }
+
+ DECLARE_INSTRUCTION(LongConstant);
+
+ private:
+ explicit HLongConstant(int64_t value) : HConstant(Primitive::kPrimLong), value_(value) {}
+
+ const int64_t value_;
+
+ friend class HGraph;
+ DISALLOW_COPY_AND_ASSIGN(HLongConstant);
+};
// Conditional branch. A block ending with an HIf instruction must have
// two successors.
@@ -2196,8 +2289,8 @@
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x`.
- virtual int32_t Evaluate(int32_t x) const = 0;
- virtual int64_t Evaluate(int64_t x) const = 0;
+ virtual HConstant* Evaluate(HIntConstant* x) const = 0;
+ virtual HConstant* Evaluate(HLongConstant* x) const = 0;
DECLARE_INSTRUCTION(UnaryOperation);
@@ -2264,8 +2357,18 @@
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x` and `y`.
- virtual int32_t Evaluate(int32_t x, int32_t y) const = 0;
- virtual int64_t Evaluate(int64_t x, int64_t y) const = 0;
+ virtual HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const = 0;
+ virtual HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const = 0;
+ virtual HConstant* Evaluate(HIntConstant* x ATTRIBUTE_UNUSED,
+ HLongConstant* y ATTRIBUTE_UNUSED) const {
+ VLOG(compiler) << DebugName() << " is not defined for the (int, long) case.";
+ return nullptr;
+ }
+ virtual HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED,
+ HIntConstant* y ATTRIBUTE_UNUSED) const {
+ VLOG(compiler) << DebugName() << " is not defined for the (long, int) case.";
+ return nullptr;
+ }
// Returns an input that can legally be used as the right input and is
// constant, or null.
@@ -2348,11 +2451,13 @@
bool IsCommutative() const OVERRIDE { return true; }
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- return x == y ? 1 : 0;
+ template <typename T> bool Compute(T x, T y) const { return x == y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- return x == y ? 1 : 0;
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
DECLARE_INSTRUCTION(Equal);
@@ -2376,11 +2481,13 @@
bool IsCommutative() const OVERRIDE { return true; }
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- return x != y ? 1 : 0;
+ template <typename T> bool Compute(T x, T y) const { return x != y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- return x != y ? 1 : 0;
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
DECLARE_INSTRUCTION(NotEqual);
@@ -2402,11 +2509,13 @@
HLessThan(HInstruction* first, HInstruction* second)
: HCondition(first, second) {}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- return x < y ? 1 : 0;
+ template <typename T> bool Compute(T x, T y) const { return x < y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- return x < y ? 1 : 0;
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
DECLARE_INSTRUCTION(LessThan);
@@ -2428,11 +2537,13 @@
HLessThanOrEqual(HInstruction* first, HInstruction* second)
: HCondition(first, second) {}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- return x <= y ? 1 : 0;
+ template <typename T> bool Compute(T x, T y) const { return x <= y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- return x <= y ? 1 : 0;
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
DECLARE_INSTRUCTION(LessThanOrEqual);
@@ -2454,11 +2565,13 @@
HGreaterThan(HInstruction* first, HInstruction* second)
: HCondition(first, second) {}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- return x > y ? 1 : 0;
+ template <typename T> bool Compute(T x, T y) const { return x > y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- return x > y ? 1 : 0;
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
DECLARE_INSTRUCTION(GreaterThan);
@@ -2480,11 +2593,13 @@
HGreaterThanOrEqual(HInstruction* first, HInstruction* second)
: HCondition(first, second) {}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- return x >= y ? 1 : 0;
+ template <typename T> bool Compute(T x, T y) const { return x >= y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- return x >= y ? 1 : 0;
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
DECLARE_INSTRUCTION(GreaterThanOrEqual);
@@ -2516,18 +2631,14 @@
DCHECK_EQ(type, second->GetType());
}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- return
- x == y ? 0 :
- x > y ? 1 :
- -1;
- }
+ template <typename T>
+ int32_t Compute(T x, T y) const { return x == y ? 0 : x > y ? 1 : -1; }
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- return
- x == y ? 0 :
- x > y ? 1 :
- -1;
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
@@ -2599,27 +2710,12 @@
DISALLOW_COPY_AND_ASSIGN(HStoreLocal);
};
-class HConstant : public HExpression<0> {
- public:
- explicit HConstant(Primitive::Type type) : HExpression(type, SideEffects::None()) {}
-
- bool CanBeMoved() const OVERRIDE { return true; }
-
- virtual bool IsMinusOne() const { return false; }
- virtual bool IsZero() const { return false; }
- virtual bool IsOne() const { return false; }
-
- DECLARE_INSTRUCTION(Constant);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HConstant);
-};
-
class HFloatConstant : public HConstant {
public:
float GetValue() const { return value_; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsFloatConstant());
return bit_cast<uint32_t, float>(other->AsFloatConstant()->value_) ==
bit_cast<uint32_t, float>(value_);
}
@@ -2659,6 +2755,7 @@
double GetValue() const { return value_; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsDoubleConstant());
return bit_cast<uint64_t, double>(other->AsDoubleConstant()->value_) ==
bit_cast<uint64_t, double>(value_);
}
@@ -2693,77 +2790,6 @@
DISALLOW_COPY_AND_ASSIGN(HDoubleConstant);
};
-class HNullConstant : public HConstant {
- public:
- bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
- return true;
- }
-
- size_t ComputeHashCode() const OVERRIDE { return 0; }
-
- DECLARE_INSTRUCTION(NullConstant);
-
- private:
- HNullConstant() : HConstant(Primitive::kPrimNot) {}
-
- friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HNullConstant);
-};
-
-// Constants of the type int. Those can be from Dex instructions, or
-// synthesized (for example with the if-eqz instruction).
-class HIntConstant : public HConstant {
- public:
- int32_t GetValue() const { return value_; }
-
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- return other->AsIntConstant()->value_ == value_;
- }
-
- size_t ComputeHashCode() const OVERRIDE { return GetValue(); }
-
- bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
- bool IsZero() const OVERRIDE { return GetValue() == 0; }
- bool IsOne() const OVERRIDE { return GetValue() == 1; }
-
- DECLARE_INSTRUCTION(IntConstant);
-
- private:
- explicit HIntConstant(int32_t value) : HConstant(Primitive::kPrimInt), value_(value) {}
-
- const int32_t value_;
-
- friend class HGraph;
- ART_FRIEND_TEST(GraphTest, InsertInstructionBefore);
- ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast);
- DISALLOW_COPY_AND_ASSIGN(HIntConstant);
-};
-
-class HLongConstant : public HConstant {
- public:
- int64_t GetValue() const { return value_; }
-
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- return other->AsLongConstant()->value_ == value_;
- }
-
- size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
-
- bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
- bool IsZero() const OVERRIDE { return GetValue() == 0; }
- bool IsOne() const OVERRIDE { return GetValue() == 1; }
-
- DECLARE_INSTRUCTION(LongConstant);
-
- private:
- explicit HLongConstant(int64_t value) : HConstant(Primitive::kPrimLong), value_(value) {}
-
- const int64_t value_;
-
- friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HLongConstant);
-};
-
enum class Intrinsics {
#define OPTIMIZING_INTRINSICS(Name, IsStatic) k ## Name,
#include "intrinsics_list.h"
@@ -3082,8 +3108,14 @@
explicit HNeg(Primitive::Type result_type, HInstruction* input)
: HUnaryOperation(result_type, input) {}
- int32_t Evaluate(int32_t x) const OVERRIDE { return -x; }
- int64_t Evaluate(int64_t x) const OVERRIDE { return -x; }
+ template <typename T> T Compute(T x) const { return -x; }
+
+ HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()));
+ }
DECLARE_INSTRUCTION(Neg);
@@ -3140,11 +3172,13 @@
bool IsCommutative() const OVERRIDE { return true; }
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- return x + y;
+ template <typename T> T Compute(T x, T y) const { return x + y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- return x + y;
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
}
DECLARE_INSTRUCTION(Add);
@@ -3158,11 +3192,13 @@
HSub(Primitive::Type result_type, HInstruction* left, HInstruction* right)
: HBinaryOperation(result_type, left, right) {}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- return x - y;
+ template <typename T> T Compute(T x, T y) const { return x - y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- return x - y;
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
}
DECLARE_INSTRUCTION(Sub);
@@ -3178,8 +3214,14 @@
bool IsCommutative() const OVERRIDE { return true; }
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x * y; }
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x * y; }
+ template <typename T> T Compute(T x, T y) const { return x * y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ }
DECLARE_INSTRUCTION(Mul);
@@ -3192,17 +3234,20 @@
HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
: HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- // Our graph structure ensures we never have 0 for `y` during constant folding.
+ template <typename T>
+ T Compute(T x, T y) const {
+ // Our graph structure ensures we never have 0 for `y` during
+ // constant folding.
DCHECK_NE(y, 0);
// Special case -1 to avoid getting a SIGFPE on x86(_64).
return (y == -1) ? -x : x / y;
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- DCHECK_NE(y, 0);
- // Special case -1 to avoid getting a SIGFPE on x86(_64).
- return (y == -1) ? -x : x / y;
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
}
uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
@@ -3220,16 +3265,20 @@
HRem(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
: HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+ template <typename T>
+ T Compute(T x, T y) const {
+ // Our graph structure ensures we never have 0 for `y` during
+ // constant folding.
DCHECK_NE(y, 0);
// Special case -1 to avoid getting a SIGFPE on x86(_64).
return (y == -1) ? 0 : x % y;
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- DCHECK_NE(y, 0);
- // Special case -1 to avoid getting a SIGFPE on x86(_64).
- return (y == -1) ? 0 : x % y;
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
}
uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
@@ -3274,8 +3323,27 @@
HShl(Primitive::Type result_type, HInstruction* left, HInstruction* right)
: HBinaryOperation(result_type, left, right) {}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x << (y & kMaxIntShiftValue); }
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x << (y & kMaxLongShiftValue); }
+ template <typename T, typename U, typename V>
+ T Compute(T x, U y, V max_shift_value) const {
+ static_assert(std::is_same<V, typename std::make_unsigned<T>::type>::value,
+ "V is not the unsigned integer type corresponding to T");
+ return x << (y & max_shift_value);
+ }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue));
+ }
+ // There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this
+ // case is handled as `x << static_cast<int>(y)`.
+ HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ }
DECLARE_INSTRUCTION(Shl);
@@ -3288,8 +3356,27 @@
HShr(Primitive::Type result_type, HInstruction* left, HInstruction* right)
: HBinaryOperation(result_type, left, right) {}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x >> (y & kMaxIntShiftValue); }
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x >> (y & kMaxLongShiftValue); }
+ template <typename T, typename U, typename V>
+ T Compute(T x, U y, V max_shift_value) const {
+ static_assert(std::is_same<V, typename std::make_unsigned<T>::type>::value,
+ "V is not the unsigned integer type corresponding to T");
+ return x >> (y & max_shift_value);
+ }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue));
+ }
+ // There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this
+ // case is handled as `x >> static_cast<int>(y)`.
+ HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ }
DECLARE_INSTRUCTION(Shr);
@@ -3302,16 +3389,27 @@
HUShr(Primitive::Type result_type, HInstruction* left, HInstruction* right)
: HBinaryOperation(result_type, left, right) {}
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
- uint32_t ux = static_cast<uint32_t>(x);
- uint32_t uy = static_cast<uint32_t>(y) & kMaxIntShiftValue;
- return static_cast<int32_t>(ux >> uy);
+ template <typename T, typename U, typename V>
+ T Compute(T x, U y, V max_shift_value) const {
+ static_assert(std::is_same<V, typename std::make_unsigned<T>::type>::value,
+ "V is not the unsigned integer type corresponding to T");
+ V ux = static_cast<V>(x);
+ return static_cast<T>(ux >> (y & max_shift_value));
}
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
- uint64_t ux = static_cast<uint64_t>(x);
- uint64_t uy = static_cast<uint64_t>(y) & kMaxLongShiftValue;
- return static_cast<int64_t>(ux >> uy);
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue));
+ }
+ // There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this
+ // case is handled as `x >>> static_cast<int>(y)`.
+ HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
}
DECLARE_INSTRUCTION(UShr);
@@ -3327,8 +3425,21 @@
bool IsCommutative() const OVERRIDE { return true; }
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x & y; }
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x & y; }
+ template <typename T, typename U>
+ auto Compute(T x, U y) const -> decltype(x & y) { return x & y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ }
DECLARE_INSTRUCTION(And);
@@ -3343,8 +3454,21 @@
bool IsCommutative() const OVERRIDE { return true; }
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x | y; }
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x | y; }
+ template <typename T, typename U>
+ auto Compute(T x, U y) const -> decltype(x | y) { return x | y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ }
DECLARE_INSTRUCTION(Or);
@@ -3359,8 +3483,21 @@
bool IsCommutative() const OVERRIDE { return true; }
- int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x ^ y; }
- int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x ^ y; }
+ template <typename T, typename U>
+ auto Compute(T x, U y) const -> decltype(x ^ y) { return x ^ y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ }
DECLARE_INSTRUCTION(Xor);
@@ -3405,8 +3542,14 @@
return true;
}
- int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
- int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
+ template <typename T> T Compute(T x) const { return ~x; }
+
+ HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()));
+ }
DECLARE_INSTRUCTION(Not);
@@ -3425,13 +3568,16 @@
return true;
}
- int32_t Evaluate(int32_t x) const OVERRIDE {
+ template <typename T> bool Compute(T x) const {
DCHECK(IsUint<1>(x));
return !x;
}
- int64_t Evaluate(int64_t x ATTRIBUTE_UNUSED) const OVERRIDE {
- LOG(FATAL) << DebugName() << " cannot be used with 64-bit values";
+ HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()));
+ }
+ HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for long values";
UNREACHABLE();
}
@@ -4142,6 +4288,18 @@
DISALLOW_COPY_AND_ASSIGN(HLoadException);
};
+// Implicit part of move-exception which clears thread-local exception storage.
+// Must not be removed because the runtime expects the TLS to get cleared.
+class HClearException : public HTemplateInstruction<0> {
+ public:
+ HClearException() : HTemplateInstruction(SideEffects::AllWrites()) {}
+
+ DECLARE_INSTRUCTION(ClearException);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HClearException);
+};
+
class HThrow : public HTemplateInstruction<1> {
public:
HThrow(HInstruction* exception, uint32_t dex_pc)
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index 54ea6f1..f9d812f 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -38,6 +38,20 @@
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
+ // Move stack/stack slot to take advantage of a free register on constrained machines.
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ const MoveOperands& move = *moves_.Get(i);
+ // Ignore constants and moves already eliminated.
+ if (move.IsEliminated() || move.GetSource().IsConstant()) {
+ continue;
+ }
+
+ if ((move.GetSource().IsStackSlot() || move.GetSource().IsDoubleStackSlot()) &&
+ (move.GetDestination().IsStackSlot() || move.GetDestination().IsDoubleStackSlot())) {
+ PerformMove(i);
+ }
+ }
+
for (size_t i = 0; i < moves_.Size(); ++i) {
const MoveOperands& move = *moves_.Get(i);
// Skip constants to perform them last. They don't block other moves
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 02f9f58..94977fb 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -51,7 +51,7 @@
// to sleep, resulting in a deadlock.
// Increment the count by delta, wait on condition if count is non zero.
- void Increment(Thread* self, int delta) REQUIRES(!lock_);;
+ void Increment(Thread* self, int delta) REQUIRES(!lock_);
// Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
// true if time out occurred.
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d0504d9..2801fb7 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -332,36 +332,40 @@
bool IsExclusiveHeld(const Thread* self) const;
// Assert the current thread has exclusive access to the ReaderWriterMutex.
- void AssertExclusiveHeld(const Thread* self) {
+ void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(IsExclusiveHeld(self)) << *this;
}
}
- void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
+ void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
// Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
- void AssertNotExclusiveHeld(const Thread* self) {
+ void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsExclusiveHeld(self)) << *this;
}
}
- void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
+ void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
+ AssertNotExclusiveHeld(self);
+ }
// Is the current thread a shared holder of the ReaderWriterMutex.
bool IsSharedHeld(const Thread* self) const;
// Assert the current thread has shared access to the ReaderWriterMutex.
- void AssertSharedHeld(const Thread* self) {
+ void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
if (kDebugLocking && (gAborting == 0)) {
// TODO: we can only assert this well when self != null.
CHECK(IsSharedHeld(self) || self == nullptr) << *this;
}
}
- void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
+ void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
+ AssertSharedHeld(self);
+ }
// Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
// mode.
- void AssertNotHeld(const Thread* self) {
+ void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsSharedHeld(self)) << *this;
}
@@ -679,6 +683,7 @@
class Roles {
public:
+ // Uninterruptible means that the thread may not become suspended.
static Uninterruptible uninterruptible_;
};
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 252a47d..4182954 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -107,13 +107,14 @@
return item.IsNull();
}
};
- // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results
+ // hash set which hashes class descriptor, and compares descriptors and class loaders. Results
// should be compared for a matching Class descriptor and class loader.
typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals,
ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>>
ClassSet;
// TODO: shard lock to have one per class loader.
+ // We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot.
std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
};
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c9ae9b8..8e60814 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2093,7 +2093,7 @@
case kWaitingInMainDebuggerLoop:
case kWaitingInMainSignalCatcherLoop:
case kWaitingPerformingGc:
- case kWaitingWeakRootRead:
+ case kWaitingWeakGcRootRead:
case kWaiting:
return JDWP::TS_WAIT;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 157f609..b9e8925 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -101,28 +101,34 @@
// Extra parameters are required since we use this same visitor signature for checking objects.
void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- // Only add the reference if it is non null and fits our criteria.
- mirror::HeapReference<Object>* const obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
- mirror::Object* ref = obj_ptr->AsMirrorPtr();
- if (ref != nullptr && !from_space_->HasAddress(ref) && !immune_space_->HasAddress(ref)) {
- *contains_reference_to_other_space_ = true;
- visitor_->MarkHeapReference(obj_ptr);
- }
+ MarkReference(obj->GetFieldObjectReferenceAddr(offset));
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- if (kIsDebugBuild && !root->IsNull()) {
- VisitRoot(root);
- }
+ VisitRoot(root);
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(from_space_->HasAddress(root->AsMirrorPtr()));
+ MarkReference(root);
}
private:
+ template<bool kPoisonReferences>
+ void MarkReference(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Only add the reference if it is non null and fits our criteria.
+ mirror::Object* ref = obj_ptr->AsMirrorPtr();
+ if (ref != nullptr && !from_space_->HasAddress(ref) && !immune_space_->HasAddress(ref)) {
+ *contains_reference_to_other_space_ = true;
+ mirror::Object* new_object = visitor_->MarkObject(ref);
+ if (ref != new_object) {
+ obj_ptr->Assign(new_object);
+ }
+ }
+ }
+
MarkObjectVisitor* const visitor_;
// Space which we are scanning
space::ContinuousSpace* const from_space_;
diff --git a/runtime/gc/weak_root_state.h b/runtime/gc/weak_root_state.h
index b66f19d..e3cefc4 100644
--- a/runtime/gc/weak_root_state.h
+++ b/runtime/gc/weak_root_state.h
@@ -28,6 +28,8 @@
// Need to wait until we can read weak roots.
kWeakRootStateNoReadsOrWrites,
// Need to mark new weak roots to make sure they don't get swept.
+ // kWeakRootStateMarkNewRoots is currently unused but I was planning on using to allow adding new
+ // weak roots during the CMS reference processing phase.
kWeakRootStateMarkNewRoots,
};
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index ae521b1..2be570a 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -90,7 +90,6 @@
}
mirror::String* InternTable::LookupWeak(mirror::String* s) {
- // TODO: Return only if marked.
return weak_interns_.Find(s);
}
@@ -229,7 +228,7 @@
void InternTable::WaitUntilAccessible(Thread* self) {
Locks::intern_table_lock_->ExclusiveUnlock(self);
- self->TransitionFromRunnableToSuspended(kWaitingWeakRootRead);
+ self->TransitionFromRunnableToSuspended(kWaitingWeakGcRootRead);
Locks::intern_table_lock_->ExclusiveLock(self);
while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
weak_intern_condition_.Wait(self);
@@ -250,24 +249,35 @@
CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
}
while (true) {
+ if (holding_locks) {
+ if (!kUseReadBarrier) {
+ CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
+ } else {
+ CHECK(self->GetWeakRefAccessEnabled());
+ }
+ }
// Check the strong table for a match.
mirror::String* strong = LookupStrong(s);
if (strong != nullptr) {
return strong;
}
+ if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
+ (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
+ break;
+ }
// weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
// cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
// cleared.
- if (weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
- break;
- }
CHECK(!holding_locks);
StackHandleScope<1> hs(self);
auto h = hs.NewHandleWrapper(&s);
WaitUntilAccessible(self);
}
- CHECK_NE(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
- DCHECK_NE(weak_root_state_, gc::kWeakRootStateMarkNewRoots) << "Unsupported";
+ if (!kUseReadBarrier) {
+ CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
+ } else {
+ CHECK(self->GetWeakRefAccessEnabled());
+ }
// There is no match in the strong table, check the weak table.
mirror::String* weak = LookupWeak(s);
if (weak != nullptr) {
@@ -298,7 +308,7 @@
return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
}
-mirror::String* InternTable::InternImageString(mirror::String* s) {
+mirror::String* InternTable::InternStrongImageString(mirror::String* s) {
// May be holding the heap bitmap lock.
return Insert(s, true, true);
}
@@ -319,8 +329,6 @@
void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
weak_interns_.SweepWeaks(visitor);
- // Done sweeping, back to a normal state.
- ChangeWeakRootStateLocked(gc::kWeakRootStateNormal);
}
void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 0be6675..ae9f7a7 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -61,8 +61,10 @@
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// Only used by image writer. Special version that may not cause thread suspension since the GC
- // can not be running while we are doing image writing.
- mirror::String* InternImageString(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_);
+ // can not be running while we are doing image writing. Maybe be called while while holding a
+ // lock since there will not be thread suspension.
+ mirror::String* InternStrongImageString(mirror::String* s)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_)
@@ -184,7 +186,9 @@
UnorderedSet post_zygote_table_;
};
- // Insert if non null, otherwise return null.
+ // Insert if non null, otherwise return null. Must be called holding the mutator lock.
+ // If holding_locks is true, then we may also hold other locks. If holding_locks is true, then we
+ // require GC is not running since it is not safe to wait while holding locks.
mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 9de9e8a..f923b84 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -884,7 +884,7 @@
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
@@ -897,7 +897,7 @@
// Explicit DoLambdaCall template function declarations.
#define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, \
@@ -911,7 +911,7 @@
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \
const ShadowFrame& shadow_frame, \
Thread* self, JValue* result)
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index f5ac9d0..ae02fe6 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -128,6 +128,9 @@
* the debugger.
*
* Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
+ *
+ * NO_THREAD_SAFETY_ANALYSIS since we can't annotate that we do not have
+ * state->thread_start_lock_ held.
*/
static JdwpState* Create(const JdwpOptions* options)
REQUIRES(!Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index b40d94a..7118f36 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -90,7 +90,7 @@
case kWaitingInMainSignalCatcherLoop: return kJavaWaiting;
case kWaitingForMethodTracingStart: return kJavaWaiting;
case kWaitingForVisitObjects: return kJavaWaiting;
- case kWaitingWeakRootRead: return kJavaWaiting;
+ case kWaitingWeakGcRootRead: return kJavaWaiting;
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1914124..1912314 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1512,7 +1512,7 @@
void Runtime::AllowNewSystemWeaks() {
monitor_list_->AllowNewMonitors();
- intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping?
+ intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
java_vm_->AllowNewWeakGlobals();
heap_->AllowNewAllocationRecords();
lambda_box_table_->AllowNewWeakBoxedLambdas();
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 39ef68a..8bf241b 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -19,6 +19,10 @@
#include "thread.h"
+#ifdef __ANDROID__
+#include <bionic_tls.h> // Access to our own TLS slot.
+#endif
+
#include <pthread.h>
#include "base/casts.h"
@@ -41,7 +45,11 @@
if (!is_started_) {
return nullptr;
} else {
+#ifdef __ANDROID__
+ void* thread = __get_tls()[TLS_SLOT_ART_THREAD_SELF];
+#else
void* thread = pthread_getspecific(Thread::pthread_key_self_);
+#endif
return reinterpret_cast<Thread*>(thread);
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b3efad0..74e3f11 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -527,7 +527,11 @@
InitCardTable();
InitTid();
+#ifdef __ANDROID__
+ __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
+#else
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
+#endif
DCHECK_EQ(Thread::Current(), this);
tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
@@ -1349,7 +1353,11 @@
LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
"going to use a pthread_key_create destructor?): " << *self;
CHECK(is_started_);
+#ifdef __ANDROID__
+ __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self;
+#else
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
+#endif
self->tls32_.thread_exit_check_count = 1;
} else {
LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
@@ -2736,7 +2744,7 @@
size_t Thread::NumberOfHeldMutexes() const {
size_t count = 0;
for (BaseMutex* mu : tlsPtr_.held_mutexes) {
- count += static_cast<size_t>(mu != nullptr);
+ count += mu != nullptr ? 1 : 0;
}
return count;
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 60c9b5e..62d1e84 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1228,7 +1228,11 @@
// Clear the TLS data, so that the underlying native thread is recognizably detached.
// (It may wish to reattach later.)
+#ifdef __ANDROID__
+ __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr;
+#else
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
+#endif
// Signal that a thread just detached.
MutexLock mu(nullptr, *Locks::thread_list_lock_);
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index c000e61..a11d213 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -43,7 +43,7 @@
kWaitingForMethodTracingStart, // WAITING TS_WAIT waiting for method tracing to start
kWaitingForVisitObjects, // WAITING TS_WAIT waiting for visiting objects
kWaitingForGetObjectsAllocated, // WAITING TS_WAIT waiting for getting the number of allocated objects
- kWaitingWeakRootRead, // WAITING TS_WAIT waiting to read a weak root
+ kWaitingWeakGcRootRead, // WAITING TS_WAIT waiting on the GC to read a weak root
kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger
diff --git a/test/415-optimizing-arith-neg/src/Main.java b/test/415-optimizing-arith-neg/src/Main.java
index cabf635..c53b639 100644
--- a/test/415-optimizing-arith-neg/src/Main.java
+++ b/test/415-optimizing-arith-neg/src/Main.java
@@ -15,7 +15,8 @@
*/
// Note that $opt$ is a marker for the optimizing compiler to test
-// it does compile the method.
+// it does compile the method, and that $noinline$ is a marker to
+// test that it does not inline it.
public class Main {
public static void assertEquals(int expected, int result) {
@@ -68,23 +69,23 @@
public static void main(String[] args) {
negInt();
- $opt$InplaceNegOneInt(1);
+ $opt$noinline$InplaceNegOneInt(1);
negLong();
- $opt$InplaceNegOneLong(1L);
+ $opt$noinline$InplaceNegOneLong(1L);
negFloat();
negDouble();
}
private static void negInt() {
- assertEquals(-1, $opt$NegInt(1));
- assertEquals(1, $opt$NegInt(-1));
- assertEquals(0, $opt$NegInt(0));
- assertEquals(51, $opt$NegInt(-51));
- assertEquals(-51, $opt$NegInt(51));
- assertEquals(2147483647, $opt$NegInt(-2147483647)); // -(2^31 - 1)
- assertEquals(-2147483647, $opt$NegInt(2147483647)); // 2^31 - 1
+ assertEquals(-1, $opt$noinline$NegInt(1));
+ assertEquals(1, $opt$noinline$NegInt(-1));
+ assertEquals(0, $opt$noinline$NegInt(0));
+ assertEquals(51, $opt$noinline$NegInt(-51));
+ assertEquals(-51, $opt$noinline$NegInt(51));
+ assertEquals(2147483647, $opt$noinline$NegInt(-2147483647)); // -(2^31 - 1)
+ assertEquals(-2147483647, $opt$noinline$NegInt(2147483647)); // 2^31 - 1
// From the Java 7 SE Edition specification:
// http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.15.4
//
@@ -95,101 +96,128 @@
// int or long results in that same maximum negative number.
// Overflow occurs in this case, but no exception is thrown.
// For all integer values x, -x equals (~x)+1.''
- assertEquals(-2147483648, $opt$NegInt(-2147483648)); // -(2^31)
+ assertEquals(-2147483648, $opt$noinline$NegInt(-2147483648)); // -(2^31)
}
- private static void $opt$InplaceNegOneInt(int a) {
+ private static void negLong() {
+ assertEquals(-1L, $opt$noinline$NegLong(1L));
+ assertEquals(1L, $opt$noinline$NegLong(-1L));
+ assertEquals(0L, $opt$noinline$NegLong(0L));
+ assertEquals(51L, $opt$noinline$NegLong(-51L));
+ assertEquals(-51L, $opt$noinline$NegLong(51L));
+
+ assertEquals(2147483647L, $opt$noinline$NegLong(-2147483647L)); // -(2^31 - 1)
+ assertEquals(-2147483647L, $opt$noinline$NegLong(2147483647L)); // (2^31 - 1)
+ assertEquals(2147483648L, $opt$noinline$NegLong(-2147483648L)); // -(2^31)
+ assertEquals(-2147483648L, $opt$noinline$NegLong(2147483648L)); // 2^31
+
+ assertEquals(9223372036854775807L, $opt$noinline$NegLong(-9223372036854775807L)); // -(2^63 - 1)
+ assertEquals(-9223372036854775807L, $opt$noinline$NegLong(9223372036854775807L)); // 2^63 - 1
+ // See remark regarding the negation of the maximum negative
+ // (long) value in negInt().
+ assertEquals(-9223372036854775808L, $opt$noinline$NegLong(-9223372036854775808L)); // -(2^63)
+ }
+
+ private static void negFloat() {
+ assertEquals("-0.0", $opt$noinline$NegFloat(0F));
+ assertEquals("0.0", $opt$noinline$NegFloat(-0F));
+ assertEquals(-1F, $opt$noinline$NegFloat(1F));
+ assertEquals(1F, $opt$noinline$NegFloat(-1F));
+ assertEquals(51F, $opt$noinline$NegFloat(-51F));
+ assertEquals(-51F, $opt$noinline$NegFloat(51F));
+
+ assertEquals(-0.1F, $opt$noinline$NegFloat(0.1F));
+ assertEquals(0.1F, $opt$noinline$NegFloat(-0.1F));
+ assertEquals(343597.38362F, $opt$noinline$NegFloat(-343597.38362F));
+ assertEquals(-343597.38362F, $opt$noinline$NegFloat(343597.38362F));
+
+ assertEquals(-Float.MIN_NORMAL, $opt$noinline$NegFloat(Float.MIN_NORMAL));
+ assertEquals(Float.MIN_NORMAL, $opt$noinline$NegFloat(-Float.MIN_NORMAL));
+ assertEquals(-Float.MIN_VALUE, $opt$noinline$NegFloat(Float.MIN_VALUE));
+ assertEquals(Float.MIN_VALUE, $opt$noinline$NegFloat(-Float.MIN_VALUE));
+ assertEquals(-Float.MAX_VALUE, $opt$noinline$NegFloat(Float.MAX_VALUE));
+ assertEquals(Float.MAX_VALUE, $opt$noinline$NegFloat(-Float.MAX_VALUE));
+
+ assertEquals(Float.NEGATIVE_INFINITY, $opt$noinline$NegFloat(Float.POSITIVE_INFINITY));
+ assertEquals(Float.POSITIVE_INFINITY, $opt$noinline$NegFloat(Float.NEGATIVE_INFINITY));
+ assertIsNaN($opt$noinline$NegFloat(Float.NaN));
+ }
+
+ private static void negDouble() {
+ assertEquals("-0.0", $opt$noinline$NegDouble(0D));
+ assertEquals("0.0", $opt$noinline$NegDouble(-0D));
+ assertEquals(-1D, $opt$noinline$NegDouble(1D));
+ assertEquals(1D, $opt$noinline$NegDouble(-1D));
+ assertEquals(51D, $opt$noinline$NegDouble(-51D));
+ assertEquals(-51D, $opt$noinline$NegDouble(51D));
+
+ assertEquals(-0.1D, $opt$noinline$NegDouble(0.1D));
+ assertEquals(0.1D, $opt$noinline$NegDouble(-0.1D));
+ assertEquals(343597.38362D, $opt$noinline$NegDouble(-343597.38362D));
+ assertEquals(-343597.38362D, $opt$noinline$NegDouble(343597.38362D));
+
+ assertEquals(-Double.MIN_NORMAL, $opt$noinline$NegDouble(Double.MIN_NORMAL));
+ assertEquals(Double.MIN_NORMAL, $opt$noinline$NegDouble(-Double.MIN_NORMAL));
+ assertEquals(-Double.MIN_VALUE, $opt$noinline$NegDouble(Double.MIN_VALUE));
+ assertEquals(Double.MIN_VALUE, $opt$noinline$NegDouble(-Double.MIN_VALUE));
+ assertEquals(-Double.MAX_VALUE, $opt$noinline$NegDouble(Double.MAX_VALUE));
+ assertEquals(Double.MAX_VALUE, $opt$noinline$NegDouble(-Double.MAX_VALUE));
+
+ assertEquals(Double.NEGATIVE_INFINITY, $opt$noinline$NegDouble(Double.POSITIVE_INFINITY));
+ assertEquals(Double.POSITIVE_INFINITY, $opt$noinline$NegDouble(Double.NEGATIVE_INFINITY));
+ assertIsNaN($opt$noinline$NegDouble(Double.NaN));
+ }
+
+
+ static boolean doThrow = false;
+
+ private static void $opt$noinline$InplaceNegOneInt(int a) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
a = -a;
assertEquals(-1, a);
}
- private static void negLong() {
- assertEquals(-1L, $opt$NegLong(1L));
- assertEquals(1L, $opt$NegLong(-1L));
- assertEquals(0L, $opt$NegLong(0L));
- assertEquals(51L, $opt$NegLong(-51L));
- assertEquals(-51L, $opt$NegLong(51L));
-
- assertEquals(2147483647L, $opt$NegLong(-2147483647L)); // -(2^31 - 1)
- assertEquals(-2147483647L, $opt$NegLong(2147483647L)); // (2^31 - 1)
- assertEquals(2147483648L, $opt$NegLong(-2147483648L)); // -(2^31)
- assertEquals(-2147483648L, $opt$NegLong(2147483648L)); // 2^31
-
- assertEquals(9223372036854775807L, $opt$NegLong(-9223372036854775807L)); // -(2^63 - 1)
- assertEquals(-9223372036854775807L, $opt$NegLong(9223372036854775807L)); // 2^63 - 1
- // See remark regarding the negation of the maximum negative
- // (long) value in negInt().
- assertEquals(-9223372036854775808L, $opt$NegLong(-9223372036854775808L)); // -(2^63)
- }
-
- private static void $opt$InplaceNegOneLong(long a) {
+ private static void $opt$noinline$InplaceNegOneLong(long a) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
a = -a;
assertEquals(-1L, a);
}
- private static void negFloat() {
- assertEquals("-0.0", $opt$NegFloat(0F));
- assertEquals("0.0", $opt$NegFloat(-0F));
- assertEquals(-1F, $opt$NegFloat(1F));
- assertEquals(1F, $opt$NegFloat(-1F));
- assertEquals(51F, $opt$NegFloat(-51F));
- assertEquals(-51F, $opt$NegFloat(51F));
-
- assertEquals(-0.1F, $opt$NegFloat(0.1F));
- assertEquals(0.1F, $opt$NegFloat(-0.1F));
- assertEquals(343597.38362F, $opt$NegFloat(-343597.38362F));
- assertEquals(-343597.38362F, $opt$NegFloat(343597.38362F));
-
- assertEquals(-Float.MIN_NORMAL, $opt$NegFloat(Float.MIN_NORMAL));
- assertEquals(Float.MIN_NORMAL, $opt$NegFloat(-Float.MIN_NORMAL));
- assertEquals(-Float.MIN_VALUE, $opt$NegFloat(Float.MIN_VALUE));
- assertEquals(Float.MIN_VALUE, $opt$NegFloat(-Float.MIN_VALUE));
- assertEquals(-Float.MAX_VALUE, $opt$NegFloat(Float.MAX_VALUE));
- assertEquals(Float.MAX_VALUE, $opt$NegFloat(-Float.MAX_VALUE));
-
- assertEquals(Float.NEGATIVE_INFINITY, $opt$NegFloat(Float.POSITIVE_INFINITY));
- assertEquals(Float.POSITIVE_INFINITY, $opt$NegFloat(Float.NEGATIVE_INFINITY));
- assertIsNaN($opt$NegFloat(Float.NaN));
- }
-
- private static void negDouble() {
- assertEquals("-0.0", $opt$NegDouble(0D));
- assertEquals("0.0", $opt$NegDouble(-0D));
- assertEquals(-1D, $opt$NegDouble(1D));
- assertEquals(1D, $opt$NegDouble(-1D));
- assertEquals(51D, $opt$NegDouble(-51D));
- assertEquals(-51D, $opt$NegDouble(51D));
-
- assertEquals(-0.1D, $opt$NegDouble(0.1D));
- assertEquals(0.1D, $opt$NegDouble(-0.1D));
- assertEquals(343597.38362D, $opt$NegDouble(-343597.38362D));
- assertEquals(-343597.38362D, $opt$NegDouble(343597.38362D));
-
- assertEquals(-Double.MIN_NORMAL, $opt$NegDouble(Double.MIN_NORMAL));
- assertEquals(Double.MIN_NORMAL, $opt$NegDouble(-Double.MIN_NORMAL));
- assertEquals(-Double.MIN_VALUE, $opt$NegDouble(Double.MIN_VALUE));
- assertEquals(Double.MIN_VALUE, $opt$NegDouble(-Double.MIN_VALUE));
- assertEquals(-Double.MAX_VALUE, $opt$NegDouble(Double.MAX_VALUE));
- assertEquals(Double.MAX_VALUE, $opt$NegDouble(-Double.MAX_VALUE));
-
- assertEquals(Double.NEGATIVE_INFINITY, $opt$NegDouble(Double.POSITIVE_INFINITY));
- assertEquals(Double.POSITIVE_INFINITY, $opt$NegDouble(Double.NEGATIVE_INFINITY));
- assertIsNaN($opt$NegDouble(Double.NaN));
- }
-
- static int $opt$NegInt(int a){
+ private static int $opt$noinline$NegInt(int a){
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return -a;
}
- static long $opt$NegLong(long a){
+ private static long $opt$noinline$NegLong(long a){
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return -a;
}
- static float $opt$NegFloat(float a){
+ private static float $opt$noinline$NegFloat(float a){
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return -a;
}
- static double $opt$NegDouble(double a){
+ private static double $opt$noinline$NegDouble(double a){
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return -a;
}
}
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index 20dac42..59e7282 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -70,6 +70,25 @@
return y;
}
+ /// CHECK-START: long Main.LongNegation() constant_folding (before)
+ /// CHECK-DAG: <<Const42:j\d+>> LongConstant 42
+ /// CHECK-DAG: <<Neg:j\d+>> Neg [<<Const42>>]
+ /// CHECK-DAG: Return [<<Neg>>]
+
+ /// CHECK-START: long Main.LongNegation() constant_folding (after)
+ /// CHECK-DAG: <<ConstN42:j\d+>> LongConstant -42
+ /// CHECK-DAG: Return [<<ConstN42>>]
+
+ /// CHECK-START: long Main.LongNegation() constant_folding (after)
+ /// CHECK-NOT: Neg
+
+ public static long LongNegation() {
+ long x, y;
+ x = 42L;
+ y = -x;
+ return y;
+ }
+
/**
* Exercise constant folding on addition.
@@ -344,6 +363,273 @@
/**
+ * Exercise constant folding on left shift.
+ */
+
+ /// CHECK-START: int Main.ShlIntLong() constant_folding (before)
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
+ /// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
+ /// CHECK-DAG: <<Shl:i\d+>> Shl [<<Const1>>,<<TypeConv>>]
+ /// CHECK-DAG: Return [<<Shl>>]
+
+ /// CHECK-START: int Main.ShlIntLong() constant_folding (after)
+ /// CHECK-DAG: <<Const4:i\d+>> IntConstant 4
+ /// CHECK-DAG: Return [<<Const4>>]
+
+ /// CHECK-START: int Main.ShlIntLong() constant_folding (after)
+ /// CHECK-NOT: Shl
+
+ public static int ShlIntLong() {
+ int lhs = 1;
+ long rhs = 2;
+ return lhs << rhs;
+ }
+
+ /// CHECK-START: long Main.ShlLongInt() constant_folding (before)
+ /// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+ /// CHECK-DAG: <<Shl:j\d+>> Shl [<<Const3L>>,<<Const2>>]
+ /// CHECK-DAG: Return [<<Shl>>]
+
+ /// CHECK-START: long Main.ShlLongInt() constant_folding (after)
+ /// CHECK-DAG: <<Const12L:j\d+>> LongConstant 12
+ /// CHECK-DAG: Return [<<Const12L>>]
+
+ /// CHECK-START: long Main.ShlLongInt() constant_folding (after)
+ /// CHECK-NOT: Shl
+
+ public static long ShlLongInt() {
+ long lhs = 3;
+ int rhs = 2;
+ return lhs << rhs;
+ }
+
+
+ /**
+ * Exercise constant folding on right shift.
+ */
+
+ /// CHECK-START: int Main.ShrIntLong() constant_folding (before)
+ /// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
+ /// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
+ /// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Const7>>,<<TypeConv>>]
+ /// CHECK-DAG: Return [<<Shr>>]
+
+ /// CHECK-START: int Main.ShrIntLong() constant_folding (after)
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: Return [<<Const1>>]
+
+ /// CHECK-START: int Main.ShrIntLong() constant_folding (after)
+ /// CHECK-NOT: Shr
+
+ public static int ShrIntLong() {
+ int lhs = 7;
+ long rhs = 2;
+ return lhs >> rhs;
+ }
+
+ /// CHECK-START: long Main.ShrLongInt() constant_folding (before)
+ /// CHECK-DAG: <<Const9L:j\d+>> LongConstant 9
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+ /// CHECK-DAG: <<Shr:j\d+>> Shr [<<Const9L>>,<<Const2>>]
+ /// CHECK-DAG: Return [<<Shr>>]
+
+ /// CHECK-START: long Main.ShrLongInt() constant_folding (after)
+ /// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
+ /// CHECK-DAG: Return [<<Const2L>>]
+
+ /// CHECK-START: long Main.ShrLongInt() constant_folding (after)
+ /// CHECK-NOT: Shr
+
+ public static long ShrLongInt() {
+ long lhs = 9;
+ int rhs = 2;
+ return lhs >> rhs;
+ }
+
+
+ /**
+ * Exercise constant folding on unsigned right shift.
+ */
+
+ /// CHECK-START: int Main.UShrIntLong() constant_folding (before)
+ /// CHECK-DAG: <<ConstM7:i\d+>> IntConstant -7
+ /// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
+ /// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<ConstM7>>,<<TypeConv>>]
+ /// CHECK-DAG: Return [<<UShr>>]
+
+ /// CHECK-START: int Main.UShrIntLong() constant_folding (after)
+ /// CHECK-DAG: <<ConstRes:i\d+>> IntConstant 1073741822
+ /// CHECK-DAG: Return [<<ConstRes>>]
+
+ /// CHECK-START: int Main.UShrIntLong() constant_folding (after)
+ /// CHECK-NOT: UShr
+
+ public static int UShrIntLong() {
+ int lhs = -7;
+ long rhs = 2;
+ return lhs >>> rhs;
+ }
+
+ /// CHECK-START: long Main.UShrLongInt() constant_folding (before)
+ /// CHECK-DAG: <<ConstM9L:j\d+>> LongConstant -9
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+ /// CHECK-DAG: <<UShr:j\d+>> UShr [<<ConstM9L>>,<<Const2>>]
+ /// CHECK-DAG: Return [<<UShr>>]
+
+ /// CHECK-START: long Main.UShrLongInt() constant_folding (after)
+ /// CHECK-DAG: <<ConstRes:j\d+>> LongConstant 4611686018427387901
+ /// CHECK-DAG: Return [<<ConstRes>>]
+
+ /// CHECK-START: long Main.UShrLongInt() constant_folding (after)
+ /// CHECK-NOT: UShr
+
+ public static long UShrLongInt() {
+ long lhs = -9;
+ int rhs = 2;
+ return lhs >>> rhs;
+ }
+
+
+ /**
+ * Exercise constant folding on logical and.
+ */
+
+ /// CHECK-START: long Main.AndIntLong() constant_folding (before)
+ /// CHECK-DAG: <<Const10:i\d+>> IntConstant 10
+ /// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
+ /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
+ /// CHECK-DAG: <<And:j\d+>> And [<<TypeConv>>,<<Const3L>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: long Main.AndIntLong() constant_folding (after)
+ /// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
+ /// CHECK-DAG: Return [<<Const2>>]
+
+ /// CHECK-START: long Main.AndIntLong() constant_folding (after)
+ /// CHECK-NOT: And
+
+ public static long AndIntLong() {
+ int lhs = 10;
+ long rhs = 3;
+ return lhs & rhs;
+ }
+
+ /// CHECK-START: long Main.AndLongInt() constant_folding (before)
+ /// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
+ /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+ /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
+ /// CHECK-DAG: <<And:j\d+>> And [<<Const10L>>,<<TypeConv>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: long Main.AndLongInt() constant_folding (after)
+ /// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
+ /// CHECK-DAG: Return [<<Const2>>]
+
+ /// CHECK-START: long Main.AndLongInt() constant_folding (after)
+ /// CHECK-NOT: And
+
+ public static long AndLongInt() {
+ long lhs = 10;
+ int rhs = 3;
+ return lhs & rhs;
+ }
+
+
+ /**
+ * Exercise constant folding on logical or.
+ */
+
+ /// CHECK-START: long Main.OrIntLong() constant_folding (before)
+ /// CHECK-DAG: <<Const10:i\d+>> IntConstant 10
+ /// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
+ /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
+ /// CHECK-DAG: <<Or:j\d+>> Or [<<TypeConv>>,<<Const3L>>]
+ /// CHECK-DAG: Return [<<Or>>]
+
+ /// CHECK-START: long Main.OrIntLong() constant_folding (after)
+ /// CHECK-DAG: <<Const11:j\d+>> LongConstant 11
+ /// CHECK-DAG: Return [<<Const11>>]
+
+ /// CHECK-START: long Main.OrIntLong() constant_folding (after)
+ /// CHECK-NOT: Or
+
+ public static long OrIntLong() {
+ int lhs = 10;
+ long rhs = 3;
+ return lhs | rhs;
+ }
+
+ /// CHECK-START: long Main.OrLongInt() constant_folding (before)
+ /// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
+ /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+ /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
+ /// CHECK-DAG: <<Or:j\d+>> Or [<<Const10L>>,<<TypeConv>>]
+ /// CHECK-DAG: Return [<<Or>>]
+
+ /// CHECK-START: long Main.OrLongInt() constant_folding (after)
+ /// CHECK-DAG: <<Const11:j\d+>> LongConstant 11
+ /// CHECK-DAG: Return [<<Const11>>]
+
+ /// CHECK-START: long Main.OrLongInt() constant_folding (after)
+ /// CHECK-NOT: Or
+
+ public static long OrLongInt() {
+ long lhs = 10;
+ int rhs = 3;
+ return lhs | rhs;
+ }
+
+
+ /**
+ * Exercise constant folding on logical exclusive or.
+ */
+
+ /// CHECK-START: long Main.XorIntLong() constant_folding (before)
+ /// CHECK-DAG: <<Const10:i\d+>> IntConstant 10
+ /// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
+ /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
+ /// CHECK-DAG: <<Xor:j\d+>> Xor [<<TypeConv>>,<<Const3L>>]
+ /// CHECK-DAG: Return [<<Xor>>]
+
+ /// CHECK-START: long Main.XorIntLong() constant_folding (after)
+ /// CHECK-DAG: <<Const9:j\d+>> LongConstant 9
+ /// CHECK-DAG: Return [<<Const9>>]
+
+ /// CHECK-START: long Main.XorIntLong() constant_folding (after)
+ /// CHECK-NOT: Xor
+
+ public static long XorIntLong() {
+ int lhs = 10;
+ long rhs = 3;
+ return lhs ^ rhs;
+ }
+
+ /// CHECK-START: long Main.XorLongInt() constant_folding (before)
+ /// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
+ /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+ /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
+ /// CHECK-DAG: <<Xor:j\d+>> Xor [<<Const10L>>,<<TypeConv>>]
+ /// CHECK-DAG: Return [<<Xor>>]
+
+ /// CHECK-START: long Main.XorLongInt() constant_folding (after)
+ /// CHECK-DAG: <<Const9:j\d+>> LongConstant 9
+ /// CHECK-DAG: Return [<<Const9>>]
+
+ /// CHECK-START: long Main.XorLongInt() constant_folding (after)
+ /// CHECK-NOT: Xor
+
+ public static long XorLongInt() {
+ long lhs = 10;
+ int rhs = 3;
+ return lhs ^ rhs;
+ }
+
+
+ /**
* Exercise constant folding on constant (static) condition.
*/
@@ -539,6 +825,25 @@
return 0 << arg;
}
+ /// CHECK-START: long Main.ShlLong0WithInt(int) constant_folding (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const0L:j\d+>> LongConstant 0
+ /// CHECK-DAG: <<Shl:j\d+>> Shl [<<Const0L>>,<<Arg>>]
+ /// CHECK-DAG: Return [<<Shl>>]
+
+ /// CHECK-START: long Main.ShlLong0WithInt(int) constant_folding (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const0L:j\d+>> LongConstant 0
+ /// CHECK-DAG: Return [<<Const0L>>]
+
+ /// CHECK-START: long Main.ShlLong0WithInt(int) constant_folding (after)
+ /// CHECK-NOT: Shl
+
+ public static long ShlLong0WithInt(int arg) {
+ long long_zero = 0;
+ return long_zero << arg;
+ }
+
/// CHECK-START: long Main.Shr0(int) constant_folding (before)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: <<Const0:j\d+>> LongConstant 0
@@ -866,6 +1171,7 @@
public static void main(String[] args) {
assertIntEquals(-42, IntNegation());
+ assertLongEquals(-42L, LongNegation());
assertIntEquals(3, IntAddition1());
assertIntEquals(14, IntAddition2());
@@ -883,6 +1189,24 @@
assertIntEquals(2, IntRemainder());
assertLongEquals(2L, LongRemainder());
+ assertIntEquals(4, ShlIntLong());
+ assertLongEquals(12L, ShlLongInt());
+
+ assertIntEquals(1, ShrIntLong());
+ assertLongEquals(2L, ShrLongInt());
+
+ assertIntEquals(1073741822, UShrIntLong());
+ assertLongEquals(4611686018427387901L, UShrLongInt());
+
+ assertLongEquals(2, AndIntLong());
+ assertLongEquals(2, AndLongInt());
+
+ assertLongEquals(11, OrIntLong());
+ assertLongEquals(11, OrLongInt());
+
+ assertLongEquals(9, XorIntLong());
+ assertLongEquals(9, XorLongInt());
+
assertIntEquals(5, StaticCondition());
assertIntEquals(7, JumpsAndConditionals(true));
@@ -897,6 +1221,7 @@
assertIntEquals(0, Rem1(arbitrary));
assertLongEquals(0, RemN1(arbitrary));
assertIntEquals(0, Shl0(arbitrary));
+ assertLongEquals(0, ShlLong0WithInt(arbitrary));
assertLongEquals(0, Shr0(arbitrary));
assertLongEquals(0, SubSameLong(arbitrary));
assertIntEquals(0, UShr0(arbitrary));
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index d58f034..7ada189 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -153,9 +153,9 @@
},
{
description: "TimeZoneTest.testAllDisplayNames times out, needs investigation",
- result: EXEC_FAILED,
+ result: EXEC_TIMEOUT,
modes: [device],
- names: ["libcore.java.util.TimeZoneTest.testAllDisplayNames"],
+ names: ["libcore.java.util.TimeZoneTest#testAllDisplayNames"],
bug: 22786792
}
]