Merge "Revert "Rename registers in arm64.""
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 386128e..e4be21b 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -122,7 +122,7 @@
endif
# Clang on the target. Target builds use GCC by default.
-ART_TARGET_CLANG :=
+ART_TARGET_CLANG := false
ART_TARGET_CLANG_arm :=
ART_TARGET_CLANG_arm64 :=
ART_TARGET_CLANG_mips :=
@@ -168,7 +168,7 @@
ART_TARGET_CLANG_CFLAGS_x86 :=
ART_TARGET_CLANG_CFLAGS_x86_64 :=
-# these are necessary for Clang ARM64 ART builds
+# These are necessary for Clang ARM64 ART builds. TODO: remove.
ART_TARGET_CLANG_CFLAGS_arm64 += \
-Wno-implicit-exception-spec-mismatch \
-DNVALGRIND \
@@ -236,6 +236,14 @@
ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA)
ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA)
+# Colorize clang compiler warnings.
+ifeq ($(ART_HOST_CLANG),true)
+ ART_HOST_CFLAGS += -fcolor-diagnostics
+endif
+ifeq ($(ART_TARGET_CLANG),true)
+ ART_TARGET_CFLAGS += -fcolor-diagnostics
+endif
+
ART_TARGET_LDFLAGS :=
ifeq ($(TARGET_CPU_SMP),true)
ART_TARGET_CFLAGS += -DANDROID_SMP=1
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 4279955..e411164 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -1169,8 +1169,9 @@
const MirFieldInfo& field_info = gvn_->GetMirGraph()->GetIFieldLoweringInfo(mir);
uint16_t res;
if (!field_info.IsResolved() || field_info.IsVolatile()) {
- // Volatile fields always get a new memory version; field id is irrelevant.
// Unresolved fields may be volatile, so handle them as such to be safe.
+ HandleInvokeOrClInitOrAcquireOp(mir); // Volatile GETs have acquire semantics.
+ // Volatile fields always get a new memory version; field id is irrelevant.
// Use result s_reg - will be unique.
res = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
} else {
@@ -1269,14 +1270,16 @@
uint16_t LocalValueNumbering::HandleSGet(MIR* mir, uint16_t opcode) {
const MirSFieldLoweringInfo& field_info = gvn_->GetMirGraph()->GetSFieldLoweringInfo(mir);
- if (!field_info.IsInitialized() && (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
- // Class initialization can call arbitrary functions, we need to wipe aliasing values.
- HandleInvokeOrClInit(mir);
+ if (!field_info.IsResolved() || field_info.IsVolatile() ||
+ (!field_info.IsInitialized() && (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0)) {
+ // Volatile SGETs (and unresolved fields are potentially volatile) have acquire semantics
+ // and class initialization can call arbitrary functions, we need to wipe aliasing values.
+ HandleInvokeOrClInitOrAcquireOp(mir);
}
uint16_t res;
if (!field_info.IsResolved() || field_info.IsVolatile()) {
- // Volatile fields always get a new memory version; field id is irrelevant.
// Unresolved fields may be volatile, so handle them as such to be safe.
+ // Volatile fields always get a new memory version; field id is irrelevant.
// Use result s_reg - will be unique.
res = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
} else {
@@ -1306,7 +1309,7 @@
const MirSFieldLoweringInfo& field_info = gvn_->GetMirGraph()->GetSFieldLoweringInfo(mir);
if (!field_info.IsInitialized() && (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
// Class initialization can call arbitrary functions, we need to wipe aliasing values.
- HandleInvokeOrClInit(mir);
+ HandleInvokeOrClInitOrAcquireOp(mir);
}
uint16_t type = opcode - Instruction::SPUT;
if (!field_info.IsResolved()) {
@@ -1351,7 +1354,7 @@
}
}
-void LocalValueNumbering::HandleInvokeOrClInit(MIR* mir) {
+void LocalValueNumbering::HandleInvokeOrClInitOrAcquireOp(MIR* mir) {
// Use mir->offset as modifier; without elaborate inlining, it will be unique.
global_memory_version_ =
gvn_->LookupValue(kInvokeMemoryVersionBumpOp, 0u, 0u, mir->offset);
@@ -1404,9 +1407,7 @@
case Instruction::MONITOR_ENTER:
HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
- // NOTE: Keeping all aliasing values intact. Programs that rely on loads/stores of the
- // same non-volatile locations outside and inside a synchronized block being different
- // contain races that we cannot fix.
+ HandleInvokeOrClInitOrAcquireOp(mir); // Acquire operation.
break;
case Instruction::MONITOR_EXIT:
@@ -1462,14 +1463,12 @@
// Intentional fall-through.
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_STATIC_RANGE:
- if ((mir->optimization_flags & MIR_INLINED) == 0) {
- // Make ref args aliasing.
- for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
- uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
- non_aliasing_refs_.erase(reg);
- }
- HandleInvokeOrClInit(mir);
+ // Make ref args aliasing.
+ for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
+ uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
+ non_aliasing_refs_.erase(reg);
}
+ HandleInvokeOrClInitOrAcquireOp(mir);
break;
case Instruction::MOVE_RESULT:
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index e11c6e5..c60da32 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -308,7 +308,7 @@
uint16_t HandleSGet(MIR* mir, uint16_t opcode);
void HandleSPut(MIR* mir, uint16_t opcode);
void RemoveSFieldsForType(uint16_t type);
- void HandleInvokeOrClInit(MIR* mir);
+ void HandleInvokeOrClInitOrAcquireOp(MIR* mir);
bool SameMemoryVersion(const LocalValueNumbering& other) const;
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index e53c640..067bea2 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -338,16 +338,19 @@
DEF_IGET(Instruction::IGET, 1u, 0u, 0u), // Non-volatile.
DEF_IGET(Instruction::IGET, 2u, 10u, 1u), // Volatile.
DEF_IGET(Instruction::IGET, 3u, 2u, 1u), // Non-volatile.
+ DEF_IGET(Instruction::IGET, 4u, 0u, 0u), // Non-volatile.
};
PrepareIFields(ifields);
PrepareMIRs(mirs);
PerformLVN();
- ASSERT_EQ(value_names_.size(), 4u);
+ ASSERT_EQ(value_names_.size(), 5u);
EXPECT_NE(value_names_[0], value_names_[2]); // Volatile has always different value name.
EXPECT_NE(value_names_[1], value_names_[3]); // Used different base because of volatile.
+ EXPECT_NE(value_names_[1], value_names_[4]); // Not guaranteed to be the same after "acquire".
+
for (size_t i = 0; i != arraysize(mirs); ++i) {
- EXPECT_EQ((i == 2u) ? MIR_IGNORE_NULL_CHECK : 0,
+ EXPECT_EQ((i == 2u || i == 4u) ? MIR_IGNORE_NULL_CHECK : 0,
mirs_[i].optimization_flags) << i;
}
}
@@ -363,7 +366,7 @@
DEF_IGET(Instruction::IGET, 1u, 20u, 0u), // Resolved field #1, unique object.
DEF_IGET(Instruction::IGET, 2u, 21u, 0u), // Resolved field #1.
DEF_IGET_WIDE(Instruction::IGET_WIDE, 3u, 21u, 1u), // Resolved field #2.
- DEF_IGET(Instruction::IGET, 4u, 22u, 2u), // IGET doesn't clobber anything.
+ DEF_IGET(Instruction::IGET, 4u, 22u, 2u), // Unresolved IGET can be "acquire".
DEF_IGET(Instruction::IGET, 5u, 20u, 0u), // Resolved field #1, unique object.
DEF_IGET(Instruction::IGET, 6u, 21u, 0u), // Resolved field #1.
DEF_IGET_WIDE(Instruction::IGET_WIDE, 7u, 21u, 1u), // Resolved field #2.
@@ -381,14 +384,15 @@
PrepareMIRs(mirs);
PerformLVN();
ASSERT_EQ(value_names_.size(), 16u);
- EXPECT_EQ(value_names_[1], value_names_[5]);
- EXPECT_EQ(value_names_[2], value_names_[6]);
- EXPECT_EQ(value_names_[3], value_names_[7]);
- EXPECT_EQ(value_names_[1], value_names_[9]);
- EXPECT_NE(value_names_[2], value_names_[10]); // This aliased with unresolved IPUT.
- EXPECT_EQ(value_names_[3], value_names_[11]);
- EXPECT_EQ(value_names_[12], value_names_[15]);
- EXPECT_NE(value_names_[1], value_names_[14]); // This aliased with unresolved IPUT.
+ // Unresolved field is potentially volatile, so we need to adhere to the volatile semantics.
+ EXPECT_EQ(value_names_[1], value_names_[5]); // Unique object.
+ EXPECT_NE(value_names_[2], value_names_[6]); // Not guaranteed to be the same after "acquire".
+ EXPECT_NE(value_names_[3], value_names_[7]); // Not guaranteed to be the same after "acquire".
+ EXPECT_EQ(value_names_[1], value_names_[9]); // Unique object.
+ EXPECT_NE(value_names_[6], value_names_[10]); // This aliased with unresolved IPUT.
+ EXPECT_EQ(value_names_[7], value_names_[11]); // Still the same after "release".
+ EXPECT_EQ(value_names_[12], value_names_[15]); // Still the same after "release".
+ EXPECT_NE(value_names_[1], value_names_[14]); // This aliased with unresolved IPUT.
EXPECT_EQ(mirs_[0].optimization_flags, 0u);
EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
EXPECT_EQ(mirs_[2].optimization_flags, 0u);
@@ -409,7 +413,7 @@
static const MIRDef mirs[] = {
DEF_SGET(Instruction::SGET, 0u, 0u), // Resolved field #1.
DEF_SGET_WIDE(Instruction::SGET_WIDE, 1u, 1u), // Resolved field #2.
- DEF_SGET(Instruction::SGET, 2u, 2u), // SGET doesn't clobber anything.
+ DEF_SGET(Instruction::SGET, 2u, 2u), // Unresolved SGET can be "acquire".
DEF_SGET(Instruction::SGET, 3u, 0u), // Resolved field #1.
DEF_SGET_WIDE(Instruction::SGET_WIDE, 4u, 1u), // Resolved field #2.
DEF_SPUT(Instruction::SPUT, 5u, 2u), // SPUT clobbers field #1 (#2 is wide).
@@ -421,10 +425,11 @@
PrepareMIRs(mirs);
PerformLVN();
ASSERT_EQ(value_names_.size(), 8u);
- EXPECT_EQ(value_names_[0], value_names_[3]);
- EXPECT_EQ(value_names_[1], value_names_[4]);
- EXPECT_NE(value_names_[0], value_names_[6]); // This aliased with unresolved IPUT.
- EXPECT_EQ(value_names_[1], value_names_[7]);
+ // Unresolved field is potentially volatile, so we need to adhere to the volatile semantics.
+ EXPECT_NE(value_names_[0], value_names_[3]); // Not guaranteed to be the same after "acquire".
+ EXPECT_NE(value_names_[1], value_names_[4]); // Not guaranteed to be the same after "acquire".
+ EXPECT_NE(value_names_[3], value_names_[6]); // This aliased with unresolved IPUT.
+ EXPECT_EQ(value_names_[4], value_names_[7]); // Still the same after "release".
for (size_t i = 0u; i != mir_count_; ++i) {
EXPECT_EQ(0, mirs_[i].optimization_flags) << i;
}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 246ae44..51b6709 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1163,7 +1163,7 @@
if (!MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
int flags = mir->dalvikInsn.FlagsOf();
- if ((flags & Instruction::kInvoke) != 0 && (mir->optimization_flags & MIR_INLINED) == 0) {
+ if ((flags & Instruction::kInvoke) != 0) {
attributes_ &= ~METHOD_IS_LEAF;
}
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index dda9e77..bcbfb5a 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -568,8 +568,6 @@
const uint16_t* code_ptr, const uint16_t* code_end) {
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
- bool build_all_edges =
- (cu_->disable_opt & (1 << kSuppressExceptionEdges)) || is_throw || in_try_block;
/* In try block */
if (in_try_block) {
@@ -605,6 +603,8 @@
}
in_try_block = (cur_block->successor_block_list_type != kNotUsed);
}
+ bool build_all_edges =
+ (cu_->disable_opt & (1 << kSuppressExceptionEdges)) || is_throw || in_try_block;
if (!in_try_block && build_all_edges) {
BasicBlock* eh_block = CreateNewBB(kExceptionHandling);
cur_block->taken = eh_block->id;
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 2523380..0f1d765 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -566,7 +566,6 @@
break;
}
if (result) {
- invoke->optimization_flags |= MIR_INLINED;
// If the invoke has not been eliminated yet, check now whether we should do it.
// This is done so that dataflow analysis does not get tripped up seeing nop invoke.
if (static_cast<int>(invoke->dalvikInsn.opcode) != kMirOpNop) {
@@ -583,7 +582,6 @@
}
}
if (move_result != nullptr) {
- move_result->optimization_flags |= MIR_INLINED;
move_result->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 67a75cb..c308932 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1666,16 +1666,6 @@
}
void Mir2Lir::GenInvoke(CallInfo* info) {
- if ((info->opt_flags & MIR_INLINED) != 0) {
- // Already inlined but we may still need the null check.
- if (info->type != kStatic &&
- ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
- (info->opt_flags & MIR_IGNORE_NULL_CHECK) == 0)) {
- RegLocation rl_obj = LoadValue(info->args[0], kRefReg);
- GenNullCheck(rl_obj.reg);
- }
- return;
- }
DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
->GenIntrinsic(this, info)) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 6942c0f..e7160ad 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -500,17 +500,11 @@
break;
case Instruction::MOVE_RESULT_WIDE:
- if ((opt_flags & MIR_INLINED) != 0) {
- break; // Nop - combined w/ previous invoke.
- }
StoreValueWide(rl_dest, GetReturnWide(LocToRegClass(rl_dest)));
break;
case Instruction::MOVE_RESULT:
case Instruction::MOVE_RESULT_OBJECT:
- if ((opt_flags & MIR_INLINED) != 0) {
- break; // Nop - combined w/ previous invoke.
- }
StoreValue(rl_dest, GetReturn(LocToRegClass(rl_dest)));
break;
@@ -867,7 +861,7 @@
case Instruction::INVOKE_STATIC_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, true));
- if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
+ if (!kLeafOptimization) {
// If the invocation is not inlined, we can assume there is already a
// suspend check at the return site
mir_graph_->AppendGenSuspendTestList(bb);
@@ -875,59 +869,59 @@
break;
case Instruction::INVOKE_STATIC:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, false));
- if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
+ if (!kLeafOptimization) {
mir_graph_->AppendGenSuspendTestList(bb);
}
break;
case Instruction::INVOKE_DIRECT:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, false));
- if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
+ if (!kLeafOptimization) {
mir_graph_->AppendGenSuspendTestList(bb);
}
break;
case Instruction::INVOKE_DIRECT_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
- if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
+ if (!kLeafOptimization) {
mir_graph_->AppendGenSuspendTestList(bb);
}
break;
case Instruction::INVOKE_VIRTUAL:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
- if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
+ if (!kLeafOptimization) {
mir_graph_->AppendGenSuspendTestList(bb);
}
break;
case Instruction::INVOKE_VIRTUAL_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
- if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
+ if (!kLeafOptimization) {
mir_graph_->AppendGenSuspendTestList(bb);
}
break;
case Instruction::INVOKE_SUPER:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, false));
- if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
+ if (!kLeafOptimization) {
mir_graph_->AppendGenSuspendTestList(bb);
}
break;
case Instruction::INVOKE_SUPER_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, true));
- if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
+ if (!kLeafOptimization) {
mir_graph_->AppendGenSuspendTestList(bb);
}
break;
case Instruction::INVOKE_INTERFACE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, false));
- if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
+ if (!kLeafOptimization) {
mir_graph_->AppendGenSuspendTestList(bb);
}
break;
case Instruction::INVOKE_INTERFACE_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, true));
- if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
+ if (!kLeafOptimization) {
mir_graph_->AppendGenSuspendTestList(bb);
}
break;
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 30384ec..6898b50 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -1113,20 +1113,18 @@
}
uint32_t index = mir->dalvikInsn.vB;
- if (!(mir->optimization_flags & MIR_INLINED)) {
- DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
- DexFileMethodInliner* method_inliner =
- cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
- InlineMethod method;
- if (method_inliner->IsIntrinsic(index, &method)) {
- switch (method.opcode) {
- case kIntrinsicAbsDouble:
- case kIntrinsicMinMaxDouble:
- store_method_addr_ = true;
- break;
- default:
- break;
- }
+ DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
+ DexFileMethodInliner* method_inliner =
+ cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
+ InlineMethod method;
+ if (method_inliner->IsIntrinsic(index, &method)) {
+ switch (method.opcode) {
+ case kIntrinsicAbsDouble:
+ case kIntrinsicMinMaxDouble:
+ store_method_addr_ = true;
+ break;
+ default:
+ break;
}
}
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 1dcd4b3..f432e66 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1991,7 +1991,7 @@
MemberOffset offs) {
X86_64ManagedRegister dest = mdest.AsX86_64();
CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+ movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
if (kPoisonHeapReferences) {
negl(dest.AsCpuRegister());
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 3d3ae16..661de68 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -553,6 +553,10 @@
allocator_type, VoidFunctor());
if (add_finalizer && LIKELY(obj != nullptr)) {
heap->AddFinalizerReference(self, &obj);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ // Failed to allocate finalizer reference, it means that whole allocation failed
+ obj = nullptr;
+ }
}
return obj;
}
diff --git a/test/080-oom-throw/src/Main.java b/test/080-oom-throw/src/Main.java
index c93f8bb..63c5215 100644
--- a/test/080-oom-throw/src/Main.java
+++ b/test/080-oom-throw/src/Main.java
@@ -15,13 +15,15 @@
*/
public class Main {
+ static char [][] holder;
+
static class ArrayMemEater {
static boolean sawOome;
static void blowup(char[][] holder) {
try {
for (int i = 0; i < holder.length; ++i) {
- holder[i] = new char[1024 * 1024];
+ holder[i] = new char[1022 * 1024];
}
} catch (OutOfMemoryError oome) {
ArrayMemEater.sawOome = true;
@@ -50,8 +52,30 @@
}
}
- static boolean triggerArrayOOM() {
- ArrayMemEater.blowup(new char[128 * 1024][]);
+ static class InstanceFinalizerMemEater {
+ static boolean sawOome;
+ static InstanceFinalizerMemEater hook;
+
+ InstanceFinalizerMemEater next;
+
+ static InstanceFinalizerMemEater allocate() {
+ try {
+ return new InstanceFinalizerMemEater();
+ } catch (OutOfMemoryError e) {
+ InstanceFinalizerMemEater.sawOome = true;
+ return null;
+ }
+ }
+
+ static void confuseCompilerOptimization(InstanceFinalizerMemEater instance) {
+ hook = instance;
+ }
+
+ protected void finalize() {}
+ }
+
+ static boolean triggerArrayOOM(char[][] holder) {
+ ArrayMemEater.blowup(holder);
return ArrayMemEater.sawOome;
}
@@ -67,11 +91,29 @@
return InstanceMemEater.sawOome;
}
+ static boolean triggerInstanceFinalizerOOM() {
+ InstanceFinalizerMemEater memEater = InstanceFinalizerMemEater.allocate();
+ InstanceFinalizerMemEater lastMemEater = memEater;
+ do {
+ lastMemEater.next = InstanceFinalizerMemEater.allocate();
+ lastMemEater = lastMemEater.next;
+ } while (lastMemEater != null);
+ memEater.confuseCompilerOptimization(memEater);
+ InstanceFinalizerMemEater.hook = null;
+ return InstanceFinalizerMemEater.sawOome;
+ }
+
public static void main(String[] args) {
- if (triggerArrayOOM()) {
+ // Keep holder alive to make instance OOM happen faster
+ holder = new char[128 * 1024][];
+ if (triggerArrayOOM(holder)) {
System.out.println("NEW_ARRAY correctly threw OOME");
}
+ if (!triggerInstanceFinalizerOOM()) {
+ System.out.println("NEW_INSTANCE (finalize) did not threw OOME");
+ }
+
if (triggerInstanceOOM()) {
System.out.println("NEW_INSTANCE correctly threw OOME");
}
diff --git a/test/115-native-bridge/src/NativeBridgeMain.java b/test/115-native-bridge/src/NativeBridgeMain.java
index a531f92..2405627 100644
--- a/test/115-native-bridge/src/NativeBridgeMain.java
+++ b/test/115-native-bridge/src/NativeBridgeMain.java
@@ -15,6 +15,7 @@
*/
import java.lang.reflect.Method;
+import java.lang.System;
// This is named Main as it is a copy of JniTest, so that we can re-use the native implementations
// from libarttest.
@@ -29,6 +30,7 @@
testShortMethod();
testBooleanMethod();
testCharMethod();
+ testEnvironment();
}
public static native void testFindClassOnAttachedNativeThread();
@@ -147,6 +149,24 @@
}
}
}
+
+ private static void testEnvironment() {
+ String osArch = System.getProperty("os.arch");
+ if (!"os.arch".equals(osArch)) {
+ throw new AssertionError("unexpected value for os.arch: " + osArch);
+ }
+ // TODO: improve the build script to get these running as well.
+ // if (!"cpu_abi".equals(Build.CPU_ABI)) {
+ // throw new AssertionError("unexpected value for cpu_abi");
+ // }
+ // if (!"cpu_abi2".equals(Build.CPU_ABI2)) {
+ // throw new AssertionError("unexpected value for cpu_abi2");
+ // }
+ // String[] expectedSupportedAbis = {"supported1", "supported2", "supported3"};
+ // if (Arrays.equals(expectedSupportedAbis, Build.SUPPORTED_ABIS)) {
+ // throw new AssertionError("unexpected value for supported_abis");
+ // }
+ }
}
public class NativeBridgeMain {
diff --git a/test/123-compiler-regressions-mt/expected.txt b/test/123-compiler-regressions-mt/expected.txt
new file mode 100644
index 0000000..a11e5bf
--- /dev/null
+++ b/test/123-compiler-regressions-mt/expected.txt
@@ -0,0 +1,2 @@
+b17689750TestVolatile passed.
+b17689750TestMonitor passed.
diff --git a/test/123-compiler-regressions-mt/info.txt b/test/123-compiler-regressions-mt/info.txt
new file mode 100644
index 0000000..cac7e75
--- /dev/null
+++ b/test/123-compiler-regressions-mt/info.txt
@@ -0,0 +1,6 @@
+This is a test for bad optimizations affecting multi-threaded program
+behavior.
+
+This test covers fixed AOT/JIT bugs to prevent regressions.
+
+17689750 GVN assigns the same value names across MONITOR_ENTER and volatile reads.
diff --git a/test/123-compiler-regressions-mt/src/Main.java b/test/123-compiler-regressions-mt/src/Main.java
new file mode 100644
index 0000000..11fa021
--- /dev/null
+++ b/test/123-compiler-regressions-mt/src/Main.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Test for Jit regressions.
+ */
+public class Main {
+ public static void main(String args[]) throws Exception {
+ b17689750TestVolatile();
+ b17689750TestMonitor();
+ }
+
+ static void b17689750TestVolatile() {
+ final B17689750TestVolatile test = new B17689750TestVolatile();
+ new Thread() {
+ public void run() {
+ test.thread1();
+ }
+ }.start();
+ try {
+ test.thread2();
+ } catch (NullPointerException expected) {
+ System.out.println("b17689750TestVolatile passed.");
+ }
+ }
+
+ static void b17689750TestMonitor() {
+ final B17689750TestMonitor test = new B17689750TestMonitor();
+ new Thread() {
+ public void run() {
+ test.thread1();
+ }
+ }.start();
+ try {
+ test.thread2();
+ } catch (NullPointerException expected) {
+ System.out.println("b17689750TestMonitor passed.");
+ }
+ }
+}
+
+class B17689750TestVolatile {
+ private volatile int state = 0;
+ private int[] values = { 42 };
+
+ void thread1() {
+ while (state != 1) { } // Busy loop.
+ values = null;
+ state = 2;
+ }
+
+ void thread2() {
+ int[] vs1 = values;
+ state = 1;
+ while (state != 2) { } // Busy loop.
+ int[] vs2 = values;
+ int v1 = vs1[0];
+ int v2 = vs2[0];
+ System.out.println("b17689750TestVolatile failed: " + v1 + ", " + v2);
+ }
+}
+
+class B17689750TestMonitor {
+ private int state = 0;
+ private Object lock = new Object();
+ private int[] values = { 42 };
+
+ void thread1() {
+ int s;
+ do {
+ synchronized (lock) {
+ s = state;
+ }
+ } while (s != 1); // Busy loop.
+
+ synchronized (lock) {
+ values = null;
+ state = 2;
+ }
+ }
+
+ void thread2() {
+ int[] vs1;
+ synchronized (lock) {
+ vs1 = values;
+ state = 1;
+ }
+
+ int s;
+ do {
+ synchronized (lock) {
+ s = state;
+ }
+ } while (s != 2); // Busy loop.
+
+ int[] vs2 = values;
+ int v1 = vs1[0];
+ int v2 = vs2[0];
+ System.out.println("b17689750TestMonitor failed: " + v1 + ", " + v2);
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 302db38..ae5b08f 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -526,7 +526,7 @@
$(call define-test-art-run-test-group,test-art-$(target)-run-test-$(test),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(test))_RULES)))))
$(foreach target, $(TARGET_TYPES), \
$(foreach address_size, $(ADDRESS_SIZES_$(call name-to-var,$(target))), $(eval \
- $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(address_size),$(ART_RUN_TEST_$(address_size)_RULES)))))
+ $(call define-test-art-run-test-group,test-art-$(target)-run-test$(address_size),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(address_size)_RULES)))))
# Clear variables now we're finished with them.
$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))