Merge "Expect null referent in DequeuePendingReference()."
diff --git a/Android.mk b/Android.mk
index 8859d3a..b8ba9f2 100644
--- a/Android.mk
+++ b/Android.mk
@@ -33,7 +33,7 @@
# Don't bother with tests unless there is a test-art*, build-art*, or related target.
art_test_bother := false
-ifneq (,$(filter %tests test-art% valgrind-test-art% build-art%,$(MAKECMDGOALS)))
+ifneq (,$(filter tests test-art% valgrind-test-art% build-art% checkbuild,$(MAKECMDGOALS)))
art_test_bother := true
endif
@@ -119,6 +119,7 @@
include $(art_path)/build/Android.common_test.mk
include $(art_path)/build/Android.gtest.mk
include $(art_path)/test/Android.run-test.mk
+include $(art_path)/benchmark/Android.mk
# Sync test files to the target, depends upon all things that must be pushed to the target.
.PHONY: test-art-target-sync
diff --git a/benchmark/Android.mk b/benchmark/Android.mk
new file mode 100644
index 0000000..09aca98
--- /dev/null
+++ b/benchmark/Android.mk
@@ -0,0 +1,78 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include art/build/Android.common_build.mk
+
+LIBARTBENCHMARK_COMMON_SRC_FILES := \
+ jni-perf/perf_jni.cc \
+ scoped-primitive-array/scoped_primitive_array.cc
+
+# $(1): target or host
+define build-libartbenchmark
+ ifneq ($(1),target)
+ ifneq ($(1),host)
+ $$(error expected target or host for argument 1, received $(1))
+ endif
+ endif
+
+ art_target_or_host := $(1)
+
+ include $(CLEAR_VARS)
+ LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+ LOCAL_MODULE := libartbenchmark
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_MODULE_TAGS := tests
+ endif
+ LOCAL_SRC_FILES := $(LIBARTBENCHMARK_COMMON_SRC_FILES)
+ LOCAL_SHARED_LIBRARIES += libart libbacktrace libnativehelper
+ LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
+ LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
+ LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+ ifeq ($$(art_target_or_host),target)
+ $(call set-target-local-clang-vars)
+ $(call set-target-local-cflags-vars,debug)
+ LOCAL_SHARED_LIBRARIES += libdl
+ LOCAL_MULTILIB := both
+ # LOCAL_MODULE_PATH_32 := $(ART_TARGET_OUT)/$(ART_TARGET_ARCH_32)
+ # LOCAL_MODULE_PATH_64 := $(ART_TARGET_OUT)/$(ART_TARGET_ARCH_64)
+ LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH)
+ include $(BUILD_SHARED_LIBRARY)
+ else # host
+ LOCAL_CLANG := $(ART_HOST_CLANG)
+ LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
+ LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS)
+ LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
+ LOCAL_IS_HOST_MODULE := true
+ LOCAL_MULTILIB := both
+ include $(BUILD_HOST_SHARED_LIBRARY)
+ endif
+
+ # Clear locally used variables.
+ art_target_or_host :=
+endef
+
+ifeq ($(ART_BUILD_TARGET),true)
+ $(eval $(call build-libartbenchmark,target))
+endif
+ifeq ($(ART_BUILD_HOST),true)
+ $(eval $(call build-libartbenchmark,host))
+endif
+
+# Clear locally used variables.
+LOCAL_PATH :=
+LIBARTBENCHMARK_COMMON_SRC_FILES :=
diff --git a/test/999-jni-perf/info.txt b/benchmark/jni-perf/info.txt
similarity index 100%
rename from test/999-jni-perf/info.txt
rename to benchmark/jni-perf/info.txt
diff --git a/test/999-jni-perf/perf-jni.cc b/benchmark/jni-perf/perf_jni.cc
similarity index 71%
rename from test/999-jni-perf/perf-jni.cc
rename to benchmark/jni-perf/perf_jni.cc
index 51eeb83..cd8d520 100644
--- a/test/999-jni-perf/perf-jni.cc
+++ b/benchmark/jni-perf/perf_jni.cc
@@ -24,18 +24,14 @@
namespace {
-extern "C" JNIEXPORT jint JNICALL Java_Main_perfJniEmptyCall(JNIEnv*, jobject) {
- return 0;
+extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfJniEmptyCall(JNIEnv*, jobject) {}
+
+extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfSOACall(JNIEnv* env, jobject) {
+ ScopedObjectAccess soa(env);
}
-extern "C" JNIEXPORT jint JNICALL Java_Main_perfSOACall(JNIEnv*, jobject) {
- ScopedObjectAccess soa(Thread::Current());
- return 0;
-}
-
-extern "C" JNIEXPORT jint JNICALL Java_Main_perfSOAUncheckedCall(JNIEnv*, jobject) {
+extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfSOAUncheckedCall(JNIEnv*, jobject) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- return 0;
}
} // namespace
diff --git a/benchmark/jni-perf/src/JniPerfBenchmark.java b/benchmark/jni-perf/src/JniPerfBenchmark.java
new file mode 100644
index 0000000..b1b21ce
--- /dev/null
+++ b/benchmark/jni-perf/src/JniPerfBenchmark.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.google.caliper.SimpleBenchmark;
+
+public class JniPerfBenchmark extends SimpleBenchmark {
+ private static final String MSG = "ABCDE";
+
+ native void perfJniEmptyCall();
+ native void perfSOACall();
+ native void perfSOAUncheckedCall();
+
+ public void timeFastJNI(int N) {
+ // TODO: This might be an intrinsic.
+ for (long i = 0; i < N; i++) {
+ char c = MSG.charAt(2);
+ }
+ }
+
+ public void timeEmptyCall(int N) {
+ for (long i = 0; i < N; i++) {
+ perfJniEmptyCall();
+ }
+ }
+
+ public void timeSOACall(int N) {
+ for (long i = 0; i < N; i++) {
+ perfSOACall();
+ }
+ }
+
+ public void timeSOAUncheckedCall(int N) {
+ for (long i = 0; i < N; i++) {
+ perfSOAUncheckedCall();
+ }
+ }
+
+ {
+ System.loadLibrary("artbenchmark");
+ }
+}
diff --git a/test/998-scoped-primitive-array/info.txt b/benchmark/scoped-primitive-array/info.txt
similarity index 100%
rename from test/998-scoped-primitive-array/info.txt
rename to benchmark/scoped-primitive-array/info.txt
diff --git a/benchmark/scoped-primitive-array/scoped_primitive_array.cc b/benchmark/scoped-primitive-array/scoped_primitive_array.cc
new file mode 100644
index 0000000..1664157
--- /dev/null
+++ b/benchmark/scoped-primitive-array/scoped_primitive_array.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+#include "ScopedPrimitiveArray.h"
+
+extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureByteArray(
+ JNIEnv* env, jclass, int reps, jbyteArray arr) {
+ jlong ret = 0;
+ for (jint i = 0; i < reps; ++i) {
+ ScopedByteArrayRO sc(env, arr);
+ ret += sc[0] + sc[sc.size() - 1];
+ }
+ return ret;
+}
+
+extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureShortArray(
+ JNIEnv* env, jclass, int reps, jshortArray arr) {
+ jlong ret = 0;
+ for (jint i = 0; i < reps; ++i) {
+ ScopedShortArrayRO sc(env, arr);
+ ret += sc[0] + sc[sc.size() - 1];
+ }
+ return ret;
+}
+
+extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureIntArray(
+ JNIEnv* env, jclass, int reps, jintArray arr) {
+ jlong ret = 0;
+ for (jint i = 0; i < reps; ++i) {
+ ScopedIntArrayRO sc(env, arr);
+ ret += sc[0] + sc[sc.size() - 1];
+ }
+ return ret;
+}
+
+extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureLongArray(
+ JNIEnv* env, jclass, int reps, jlongArray arr) {
+ jlong ret = 0;
+ for (jint i = 0; i < reps; ++i) {
+ ScopedLongArrayRO sc(env, arr);
+ ret += sc[0] + sc[sc.size() - 1];
+ }
+ return ret;
+}
diff --git a/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java b/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java
new file mode 100644
index 0000000..be276fe
--- /dev/null
+++ b/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.google.caliper.SimpleBenchmark;
+
+public class ScopedPrimitiveArrayBenchmark extends SimpleBenchmark {
+ // Measure adds the first and last element of the array by using ScopedPrimitiveArray.
+ static native long measureByteArray(int reps, byte[] arr);
+ static native long measureShortArray(int reps, short[] arr);
+ static native long measureIntArray(int reps, int[] arr);
+ static native long measureLongArray(int reps, long[] arr);
+
+ static final int smallLength = 16;
+ static final int mediumLength = 256;
+ static final int largeLength = 8096;
+ static byte[] smallBytes = new byte[smallLength];
+ static byte[] mediumBytes = new byte[mediumLength];
+ static byte[] largeBytes = new byte[largeLength];
+ static short[] smallShorts = new short[smallLength];
+ static short[] mediumShorts = new short[mediumLength];
+ static short[] largeShorts = new short[largeLength];
+ static int[] smallInts = new int[smallLength];
+ static int[] mediumInts = new int[mediumLength];
+ static int[] largeInts = new int[largeLength];
+ static long[] smallLongs = new long[smallLength];
+ static long[] mediumLongs = new long[mediumLength];
+ static long[] largeLongs = new long[largeLength];
+
+ public void timeSmallBytes(int reps) {
+ measureByteArray(reps, smallBytes);
+ }
+
+ public void timeMediumBytes(int reps) {
+ measureByteArray(reps, mediumBytes);
+ }
+
+ public void timeLargeBytes(int reps) {
+ measureByteArray(reps, largeBytes);
+ }
+
+ public void timeSmallShorts(int reps) {
+ measureShortArray(reps, smallShorts);
+ }
+
+ public void timeMediumShorts(int reps) {
+ measureShortArray(reps, mediumShorts);
+ }
+
+ public void timeLargeShorts(int reps) {
+ measureShortArray(reps, largeShorts);
+ }
+
+ public void timeSmallInts(int reps) {
+ measureIntArray(reps, smallInts);
+ }
+
+ public void timeMediumInts(int reps) {
+ measureIntArray(reps, mediumInts);
+ }
+
+ public void timeLargeInts(int reps) {
+ measureIntArray(reps, largeInts);
+ }
+
+ public void timeSmallLongs(int reps) {
+ measureLongArray(reps, smallLongs);
+ }
+
+ public void timeMediumLongs(int reps) {
+ measureLongArray(reps, mediumLongs);
+ }
+
+ public void timeLargeLongs(int reps) {
+ measureLongArray(reps, largeLongs);
+ }
+
+ {
+ System.loadLibrary("artbenchmark");
+ }
+}
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index acce68b..a443487 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -348,16 +348,6 @@
ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default $(art_host_cflags)
ART_HOST_ASFLAGS += $(art_asflags)
-# Disable -Wpessimizing-move: triggered for art/runtime/base/variant_map.h:261
-# Adding this flag to art_clang_cflags doesn't work because -Wall gets added to
-# ART_HOST_CFLAGS (as a part of art_cflags) after
-# -Wno-pessimizing-move. Instead, add the flag here to both
-# ART_TARGET_CLANG_CFLAGS and ART_HOST_CFLAGS
-ifeq ($(ART_HOST_CLANG),true)
-ART_HOST_CFLAGS += -Wno-pessimizing-move
-endif
-ART_TARGET_CLANG_CFLAGS += -Wno-pessimizing-move
-
# The latest clang update trips over many of the files in art and never finishes
# compiling for aarch64 with -O3 (or -O2). Drop back to -O1 while we investigate
# to stop punishing the build server.
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 9895953..0a3f083 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -478,6 +478,8 @@
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
+ graph_->SetHasTryCatch(code_item.tries_size_ != 0);
+
InitializeLocals(code_item.registers_size_);
graph_->SetMaximumNumberOfOutVRegs(code_item.outs_size_);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 0bb90b2..3bbff6a 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -248,6 +248,12 @@
GenerateSlowPaths();
+ // Emit catch stack maps at the end of the stack map stream as expected by the
+ // runtime exception handler.
+ if (!is_baseline && graph_->HasTryCatch()) {
+ RecordCatchBlockInfo();
+ }
+
// Finalize instructions in assember;
Finalize(allocator);
}
@@ -805,6 +811,73 @@
stack_map_stream_.EndStackMapEntry();
}
+void CodeGenerator::RecordCatchBlockInfo() {
+ ArenaAllocator* arena = graph_->GetArena();
+
+ for (size_t i = 0, e = block_order_->Size(); i < e; ++i) {
+ HBasicBlock* block = block_order_->Get(i);
+ if (!block->IsCatchBlock()) {
+ continue;
+ }
+
+ uint32_t dex_pc = block->GetDexPc();
+ uint32_t num_vregs = graph_->GetNumberOfVRegs();
+ uint32_t inlining_depth = 0; // Inlining of catch blocks is not supported at the moment.
+ uint32_t native_pc = GetAddressOf(block);
+ uint32_t register_mask = 0; // Not used.
+
+ // The stack mask is not used, so we leave it empty.
+ ArenaBitVector* stack_mask = new (arena) ArenaBitVector(arena, 0, /* expandable */ true);
+
+ stack_map_stream_.BeginStackMapEntry(dex_pc,
+ native_pc,
+ register_mask,
+ stack_mask,
+ num_vregs,
+ inlining_depth);
+
+ HInstruction* current_phi = block->GetFirstPhi();
+ for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
+ while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
+ HInstruction* next_phi = current_phi->GetNext();
+ DCHECK(next_phi == nullptr ||
+ current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
+ << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
+ current_phi = next_phi;
+ }
+
+ if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+ } else {
+ Location location = current_phi->GetLiveInterval()->ToLocation();
+ switch (location.GetKind()) {
+ case Location::kStackSlot: {
+ stack_map_stream_.AddDexRegisterEntry(
+ DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
+ break;
+ }
+ case Location::kDoubleStackSlot: {
+ stack_map_stream_.AddDexRegisterEntry(
+ DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
+ stack_map_stream_.AddDexRegisterEntry(
+ DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
+ ++vreg;
+ DCHECK_LT(vreg, num_vregs);
+ break;
+ }
+ default: {
+ // All catch phis must be allocated to a stack slot.
+ LOG(FATAL) << "Unexpected kind " << location.GetKind();
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+
+ stack_map_stream_.EndStackMapEntry();
+ }
+}
+
void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
if (environment == nullptr) return;
@@ -975,6 +1048,13 @@
}
}
+bool CodeGenerator::IsImplicitNullCheckAllowed(HNullCheck* null_check) const {
+ return compiler_options_.GetImplicitNullChecks() &&
+ // Null checks which might throw into a catch block need to save live
+ // registers and therefore cannot be done implicitly.
+ !null_check->CanThrowIntoCatchBlock();
+}
+
bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
@@ -990,10 +1070,6 @@
return;
}
- if (!compiler_options_.GetImplicitNullChecks()) {
- return;
- }
-
if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) {
return;
}
@@ -1005,9 +1081,11 @@
// and needs to record the pc.
if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
HNullCheck* null_check = first_prev_not_move->AsNullCheck();
- // TODO: The parallel moves modify the environment. Their changes need to be reverted
- // otherwise the stack maps at the throw point will not be correct.
- RecordPcInfo(null_check, null_check->GetDexPc());
+ if (IsImplicitNullCheckAllowed(null_check)) {
+ // TODO: The parallel moves modify the environment. Their changes need to be
+ // reverted otherwise the stack maps at the throw point will not be correct.
+ RecordPcInfo(null_check, null_check->GetDexPc());
+ }
}
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b3c4d72..a93d07a 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -237,6 +237,17 @@
bool CanMoveNullCheckToUser(HNullCheck* null_check);
void MaybeRecordImplicitNullCheck(HInstruction* instruction);
+ // Records a stack map which the runtime might use to set catch phi values
+ // during exception delivery.
+ // TODO: Replace with a catch-entering instruction that records the environment.
+ void RecordCatchBlockInfo();
+
+ // Returns true if implicit null checks are allowed in the compiler options
+ // and if the null check is not inside a try block. We currently cannot do
+ // implicit null checks in that case because we need the NullCheckSlowPath to
+ // save live registers, which may be needed by the runtime to set catch phis.
+ bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const;
+
void AddSlowPath(SlowPathCode* slow_path) {
slow_paths_.Add(slow_path);
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a4c58b0..b3e38f0 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -66,6 +66,10 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
}
@@ -86,6 +90,10 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
}
@@ -150,6 +158,10 @@
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
@@ -2741,8 +2753,10 @@
}
void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3495,8 +3509,10 @@
}
void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3524,7 +3540,7 @@
}
void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
- if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
GenerateImplicitNullCheck(instruction);
} else {
GenerateExplicitNullCheck(instruction);
@@ -3863,8 +3879,10 @@
}
void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
if (instruction->HasUses()) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 6b1457b..5094f67 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -198,6 +198,10 @@
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
@@ -226,6 +230,10 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
@@ -338,6 +346,10 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
@@ -1580,8 +1592,10 @@
}
void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
if (instruction->HasUses()) {
@@ -1977,8 +1991,10 @@
}
void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2875,8 +2891,10 @@
}
void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2905,7 +2923,7 @@
}
void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
- if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
GenerateImplicitNullCheck(instruction);
} else {
GenerateExplicitNullCheck(instruction);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 10942ef..8d60026 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -118,6 +118,10 @@
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
@@ -151,6 +155,10 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
instruction_,
instruction_->GetDexPc(),
@@ -269,6 +277,10 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
instruction_,
instruction_->GetDexPc(),
@@ -1566,8 +1578,10 @@
}
void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
if (instruction->HasUses()) {
@@ -1862,8 +1876,10 @@
}
void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2824,8 +2840,10 @@
}
void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2852,7 +2870,7 @@
}
void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
- if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
GenerateImplicitNullCheck(instruction);
} else {
GenerateExplicitNullCheck(instruction);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a5ad226..dc5c86e 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -56,6 +56,10 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
instruction_,
instruction_->GetDexPc(),
@@ -78,6 +82,10 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
instruction_,
instruction_->GetDexPc(),
@@ -125,6 +133,10 @@
__ Bind(GetEntryLabel());
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
InvokeRuntimeCallingConvention calling_convention;
x86_codegen->EmitParallelMoves(
locations->InAt(0),
@@ -3039,8 +3051,10 @@
}
void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
switch (instruction->GetType()) {
case Primitive::kPrimByte:
case Primitive::kPrimChar:
@@ -3984,9 +3998,11 @@
}
void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks()
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ Location loc = codegen_->IsImplicitNullCheckAllowed(instruction)
? Location::RequiresRegister()
: Location::Any();
locations->SetInAt(0, loc);
@@ -4019,7 +4035,7 @@
__ cmpl(Address(ESP, obj.GetStackIndex()), Immediate(0));
} else {
DCHECK(obj.IsConstant()) << obj;
- DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0);
+ DCHECK(obj.GetConstant()->IsNullConstant());
__ jmp(slow_path->GetEntryLabel());
return;
}
@@ -4027,7 +4043,7 @@
}
void InstructionCodeGeneratorX86::VisitNullCheck(HNullCheck* instruction) {
- if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
GenerateImplicitNullCheck(instruction);
} else {
GenerateExplicitNullCheck(instruction);
@@ -4432,8 +4448,10 @@
}
void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (instruction->HasUses()) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0f3eb74..0cf1089 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -57,6 +57,10 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
instruction_,
instruction_->GetDexPc(),
@@ -79,6 +83,10 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
instruction_,
instruction_->GetDexPc(),
@@ -177,6 +185,10 @@
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
@@ -3194,8 +3206,10 @@
}
void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::Any());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3748,9 +3762,11 @@
}
void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks()
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ Location loc = codegen_->IsImplicitNullCheckAllowed(instruction)
? Location::RequiresRegister()
: Location::Any();
locations->SetInAt(0, loc);
@@ -3783,7 +3799,7 @@
__ cmpl(Address(CpuRegister(RSP), obj.GetStackIndex()), Immediate(0));
} else {
DCHECK(obj.IsConstant()) << obj;
- DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0);
+ DCHECK(obj.GetConstant()->IsNullConstant());
__ jmp(slow_path->GetEntryLabel());
return;
}
@@ -3791,7 +3807,7 @@
}
void InstructionCodeGeneratorX86_64::VisitNullCheck(HNullCheck* instruction) {
- if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
GenerateImplicitNullCheck(instruction);
} else {
GenerateExplicitNullCheck(instruction);
@@ -4175,8 +4191,10 @@
}
void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (instruction->HasUses()) {
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 3e35835..074ed71 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -382,17 +382,6 @@
}
}
- // Check Phi uniqueness (no two Phis with the same type refer to the same register).
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->AsPhi();
- if (phi->GetNextEquivalentPhiWithSameType() != nullptr) {
- std::stringstream type_str;
- type_str << phi->GetType();
- AddError(StringPrintf("Equivalent phi (%d) found for VReg %d with type: %s",
- phi->GetId(), phi->GetRegNumber(), type_str.str().c_str()));
- }
- }
-
// Ensure try membership information is consistent.
if (block->IsCatchBlock()) {
if (block->IsTryBlock()) {
@@ -577,6 +566,35 @@
}
}
+static bool IsSameSizeConstant(HInstruction* insn1, HInstruction* insn2) {
+ return insn1->IsConstant()
+ && insn2->IsConstant()
+ && Primitive::Is64BitType(insn1->GetType()) == Primitive::Is64BitType(insn2->GetType());
+}
+
+static bool IsConstantEquivalent(HInstruction* insn1, HInstruction* insn2, BitVector* visited) {
+ if (insn1->IsPhi() &&
+ insn1->AsPhi()->IsVRegEquivalentOf(insn2) &&
+ insn1->InputCount() == insn2->InputCount()) {
+ // Testing only one of the two inputs for recursion is sufficient.
+ if (visited->IsBitSet(insn1->GetId())) {
+ return true;
+ }
+ visited->SetBit(insn1->GetId());
+
+ for (size_t i = 0, e = insn1->InputCount(); i < e; ++i) {
+ if (!IsConstantEquivalent(insn1->InputAt(i), insn2->InputAt(i), visited)) {
+ return false;
+ }
+ }
+ return true;
+ } else if (IsSameSizeConstant(insn1, insn2)) {
+ return insn1->AsConstant()->GetValueAsUint64() == insn2->AsConstant()->GetValueAsUint64();
+ } else {
+ return false;
+ }
+}
+
void SSAChecker::VisitPhi(HPhi* phi) {
VisitInstruction(phi);
@@ -636,6 +654,45 @@
}
}
}
+
+ // Ensure that catch phis are sorted by their vreg number, as required by
+ // the register allocator and code generator. This does not apply to normal
+ // phis which can be constructed artifically.
+ if (phi->IsCatchPhi()) {
+ HInstruction* next_phi = phi->GetNext();
+ if (next_phi != nullptr && phi->GetRegNumber() > next_phi->AsPhi()->GetRegNumber()) {
+ AddError(StringPrintf("Catch phis %d and %d in block %d are not sorted by their "
+ "vreg numbers.",
+ phi->GetId(),
+ next_phi->GetId(),
+ phi->GetBlock()->GetBlockId()));
+ }
+ }
+
+ // Test phi equivalents. There should not be two of the same type and they
+ // should only be created for constants which were untyped in DEX.
+ for (HInstructionIterator phi_it(phi->GetBlock()->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HPhi* other_phi = phi_it.Current()->AsPhi();
+ if (phi != other_phi && phi->GetRegNumber() == other_phi->GetRegNumber()) {
+ if (phi->GetType() == other_phi->GetType()) {
+ std::stringstream type_str;
+ type_str << phi->GetType();
+ AddError(StringPrintf("Equivalent phi (%d) found for VReg %d with type: %s.",
+ phi->GetId(),
+ phi->GetRegNumber(),
+ type_str.str().c_str()));
+ } else {
+ ArenaBitVector visited(GetGraph()->GetArena(), 0, /* expandable */ true);
+ if (!IsConstantEquivalent(phi, other_phi, &visited)) {
+ AddError(StringPrintf("Two phis (%d and %d) found for VReg %d but they "
+ "are not equivalents of constants.",
+ phi->GetId(),
+ other_phi->GetId(),
+ phi->GetRegNumber()));
+ }
+ }
+ }
+ }
}
void SSAChecker::HandleBooleanInput(HInstruction* instruction, size_t input_index) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 2966076..efd4fcf 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -506,7 +506,7 @@
ReferenceTypeInfo::TypeHandle return_handle =
handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
- return_handle, return_handle->IsFinal() /* is_exact */));
+ return_handle, return_handle->CannotBeAssignedFromOtherTypes() /* is_exact */));
}
}
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index bc4a663..ec4a9ec 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -63,8 +63,8 @@
// Provide boiler-plate instructions.
parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
entry_->AddInstruction(parameter_);
- constant_ = new (&allocator_) HConstant(Primitive::kPrimInt);
- loop_preheader_->AddInstruction(constant_);
+ constant_ = graph_->GetIntConstant(42);
+ loop_preheader_->AddInstruction(new (&allocator_) HGoto());
loop_header_->AddInstruction(new (&allocator_) HIf(parameter_));
loop_body_->AddInstruction(new (&allocator_) HGoto());
exit_->AddInstruction(new (&allocator_) HExit());
@@ -99,23 +99,6 @@
// The actual LICM tests.
//
-TEST_F(LICMTest, ConstantHoisting) {
- BuildLoop();
-
- // Populate the loop with instructions: set array to constant.
- HInstruction* constant = new (&allocator_) HConstant(Primitive::kPrimDouble);
- loop_body_->InsertInstructionBefore(constant, loop_body_->GetLastInstruction());
- HInstruction* set_array = new (&allocator_) HArraySet(
- parameter_, constant_, constant, Primitive::kPrimDouble, 0);
- loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
-
- EXPECT_EQ(constant->GetBlock(), loop_body_);
- EXPECT_EQ(set_array->GetBlock(), loop_body_);
- PerformLICM();
- EXPECT_EQ(constant->GetBlock(), loop_preheader_);
- EXPECT_EQ(set_array->GetBlock(), loop_body_);
-}
-
TEST_F(LICMTest, FieldHoisting) {
BuildLoop();
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index b3cf0b3..cc12a10 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -20,6 +20,7 @@
#include "ssa_builder.h"
#include "base/bit_vector-inl.h"
#include "base/bit_utils.h"
+#include "mirror/class-inl.h"
#include "utils/growable_array.h"
#include "scoped_thread_state_change.h"
@@ -345,16 +346,6 @@
}
}
-bool HGraph::HasTryCatch() const {
- for (size_t i = 0, e = blocks_.Size(); i < e; ++i) {
- HBasicBlock* block = blocks_.Get(i);
- if (block != nullptr && (block->IsTryBlock() || block->IsCatchBlock())) {
- return true;
- }
- }
- return false;
-}
-
void HGraph::SimplifyCFG() {
// Simplify the CFG for future analysis, and code generation:
// (1): Split critical edges.
@@ -1773,7 +1764,7 @@
DCHECK(upper_bound_rti.IsSupertypeOf(rti))
<< " upper_bound_rti: " << upper_bound_rti
<< " rti: " << rti;
- DCHECK(!upper_bound_rti.GetTypeHandle()->IsFinal() || rti.IsExact());
+ DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact());
}
}
reference_type_info_ = rti;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 5ec3f22..d52a4f7 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -157,6 +157,7 @@
number_of_in_vregs_(0),
temporaries_vreg_slots_(0),
has_bounds_checks_(false),
+ has_try_catch_(false),
debuggable_(debuggable),
current_instruction_id_(start_instruction_id),
dex_file_(dex_file),
@@ -282,7 +283,6 @@
}
uint16_t GetNumberOfVRegs() const {
- DCHECK(!in_ssa_form_);
return number_of_vregs_;
}
@@ -360,8 +360,8 @@
return instruction_set_;
}
- // TODO: Remove once the full compilation pipeline is enabled for try/catch.
- bool HasTryCatch() const;
+ bool HasTryCatch() const { return has_try_catch_; }
+ void SetHasTryCatch(bool value) { has_try_catch_ = value; }
private:
void VisitBlockForDominatorTree(HBasicBlock* block,
@@ -433,6 +433,10 @@
// Has bounds checks. We can totally skip BCE if it's false.
bool has_bounds_checks_;
+ // Flag whether there are any try/catch blocks in the graph. We will skip
+ // try/catch-related passes if false.
+ bool has_try_catch_;
+
// Indicates whether the graph should be compiled in a way that
// ensures full debuggability. If false, we can apply more
// aggressive optimizations that may limit the level of debugging.
@@ -2187,6 +2191,8 @@
virtual bool IsZero() const { return false; }
virtual bool IsOne() const { return false; }
+ virtual uint64_t GetValueAsUint64() const = 0;
+
DECLARE_INSTRUCTION(Constant);
private:
@@ -2199,6 +2205,8 @@
return true;
}
+ uint64_t GetValueAsUint64() const OVERRIDE { return 0; }
+
size_t ComputeHashCode() const OVERRIDE { return 0; }
DECLARE_INSTRUCTION(NullConstant);
@@ -2216,6 +2224,8 @@
public:
int32_t GetValue() const { return value_; }
+ uint64_t GetValueAsUint64() const OVERRIDE { return static_cast<uint64_t>(value_); }
+
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
DCHECK(other->IsIntConstant());
return other->AsIntConstant()->value_ == value_;
@@ -2247,6 +2257,8 @@
public:
int64_t GetValue() const { return value_; }
+ uint64_t GetValueAsUint64() const OVERRIDE { return value_; }
+
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
DCHECK(other->IsLongConstant());
return other->AsLongConstant()->value_ == value_;
@@ -2866,10 +2878,13 @@
public:
float GetValue() const { return value_; }
+ uint64_t GetValueAsUint64() const OVERRIDE {
+ return static_cast<uint64_t>(bit_cast<uint32_t, float>(value_));
+ }
+
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
DCHECK(other->IsFloatConstant());
- return bit_cast<uint32_t, float>(other->AsFloatConstant()->value_) ==
- bit_cast<uint32_t, float>(value_);
+ return other->AsFloatConstant()->GetValueAsUint64() == GetValueAsUint64();
}
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
@@ -2907,10 +2922,11 @@
public:
double GetValue() const { return value_; }
+ uint64_t GetValueAsUint64() const OVERRIDE { return bit_cast<uint64_t, double>(value_); }
+
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
DCHECK(other->IsDoubleConstant());
- return bit_cast<uint64_t, double>(other->AsDoubleConstant()->value_) ==
- bit_cast<uint64_t, double>(value_);
+ return other->AsDoubleConstant()->GetValueAsUint64() == GetValueAsUint64();
}
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
@@ -4003,6 +4019,13 @@
bool IsDead() const { return !is_live_; }
bool IsLive() const { return is_live_; }
+ bool IsVRegEquivalentOf(HInstruction* other) const {
+ return other != nullptr
+ && other->IsPhi()
+ && other->AsPhi()->GetBlock() == GetBlock()
+ && other->AsPhi()->GetRegNumber() == GetRegNumber();
+ }
+
// Returns the next equivalent phi (starting from the current one) or null if there is none.
// An equivalent phi is a phi having the same dex register and type.
// It assumes that phis with the same dex register are adjacent.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f549ba8..4421460 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -485,34 +485,43 @@
RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer);
+ // TODO: Update passes incompatible with try/catch so we have the same
+ // pipeline for all methods.
if (graph->HasTryCatch()) {
- // TODO: Update the optimizations below to work correctly under try/catch
- // semantics. The optimizations above suffice for running codegen
- // in the meanwhile.
- return;
+ HOptimization* optimizations2[] = {
+ side_effects,
+ gvn,
+ dce2,
+ // The codegen has a few assumptions that only the instruction simplifier
+ // can satisfy. For example, the code generator does not expect to see a
+ // HTypeConversion from a type to the same type.
+ simplify4,
+ };
+
+ RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
+ } else {
+ MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles);
+
+ HOptimization* optimizations2[] = {
+ // BooleanSimplifier depends on the InstructionSimplifier removing
+ // redundant suspend checks to recognize empty blocks.
+ boolean_simplify,
+ fold2, // TODO: if we don't inline we can also skip fold2.
+ side_effects,
+ gvn,
+ licm,
+ bce,
+ simplify3,
+ dce2,
+ // The codegen has a few assumptions that only the instruction simplifier
+ // can satisfy. For example, the code generator does not expect to see a
+ // HTypeConversion from a type to the same type.
+ simplify4,
+ };
+
+ RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
}
- MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles);
-
- HOptimization* optimizations2[] = {
- // BooleanSimplifier depends on the InstructionSimplifier removing redundant
- // suspend checks to recognize empty blocks.
- boolean_simplify,
- fold2, // TODO: if we don't inline we can also skip fold2.
- side_effects,
- gvn,
- licm,
- bce,
- simplify3,
- dce2,
- // The codegen has a few assumptions that only the instruction simplifier can
- // satisfy. For example, the code generator does not expect to see a
- // HTypeConversion from a type to the same type.
- simplify4,
- };
-
- RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
-
RunArchOptimizations(driver->GetInstructionSet(), graph, stats, pass_observer);
}
@@ -560,17 +569,18 @@
CompilerDriver* compiler_driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer) const {
+ if (graph->HasTryCatch() && graph->IsDebuggable()) {
+ // TODO: b/24054676, stop creating catch phis eagerly to avoid special cases like phis without
+ // inputs.
+ return nullptr;
+ }
+
ScopedObjectAccess soa(Thread::Current());
StackHandleScopeCollection handles(soa.Self());
soa.Self()->TransitionFromRunnableToSuspended(kNative);
RunOptimizations(graph, compiler_driver, compilation_stats_.get(),
dex_compilation_unit, pass_observer, &handles);
- if (graph->HasTryCatch()) {
- soa.Self()->TransitionFromSuspendedToRunnable();
- return nullptr;
- }
-
AllocateRegisters(graph, codegen, pass_observer);
ArenaAllocator* arena = graph->GetArena();
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 0384e46..a88c543 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -167,7 +167,7 @@
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null);
// Narrow the type as much as possible.
- if (class_rti.GetTypeHandle()->IsFinal()) {
+ if (class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
bound_type->SetReferenceTypeInfo(
ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true));
} else if (obj_rti.IsValid() && class_rti.IsSupertypeOf(obj_rti)) {
@@ -380,7 +380,7 @@
} else if (klass != nullptr) {
ScopedObjectAccess soa(Thread::Current());
ReferenceTypeInfo::TypeHandle handle = handles_->NewHandle(klass);
- is_exact = is_exact || klass->IsFinal();
+ is_exact = is_exact || klass->CannotBeAssignedFromOtherTypes();
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact));
} else {
instr->SetReferenceTypeInfo(
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 6f1f6af..a4f1f45 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -56,6 +56,7 @@
long_spill_slots_(allocator, kDefaultNumberOfSpillSlots),
float_spill_slots_(allocator, kDefaultNumberOfSpillSlots),
double_spill_slots_(allocator, kDefaultNumberOfSpillSlots),
+ catch_phi_spill_slots_(0),
safepoints_(allocator, 0),
processing_core_registers_(false),
number_of_registers_(-1),
@@ -124,9 +125,7 @@
}
}
-void RegisterAllocator::BlockRegister(Location location,
- size_t start,
- size_t end) {
+void RegisterAllocator::BlockRegister(Location location, size_t start, size_t end) {
int reg = location.reg();
DCHECK(location.IsRegister() || location.IsFpuRegister());
LiveInterval* interval = location.IsRegister()
@@ -147,6 +146,19 @@
interval->AddRange(start, end);
}
+void RegisterAllocator::BlockRegisters(size_t start, size_t end, bool caller_save_only) {
+ for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
+ if (!caller_save_only || !codegen_->IsCoreCalleeSaveRegister(i)) {
+ BlockRegister(Location::RegisterLocation(i), start, end);
+ }
+ }
+ for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
+ if (!caller_save_only || !codegen_->IsFloatingPointCalleeSaveRegister(i)) {
+ BlockRegister(Location::FpuRegisterLocation(i), start, end);
+ }
+ }
+}
+
void RegisterAllocator::AllocateRegistersInternal() {
// Iterate post-order, to ensure the list is sorted, and the last added interval
// is the one with the lowest start position.
@@ -159,6 +171,13 @@
for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
ProcessInstruction(inst_it.Current());
}
+
+ if (block->IsCatchBlock()) {
+ // By blocking all registers at the top of each catch block, we force
+ // intervals used after catch to spill.
+ size_t position = block->GetLifetimeStart();
+ BlockRegisters(position, position + 1);
+ }
}
number_of_registers_ = codegen_->GetNumberOfCoreRegisters();
@@ -275,21 +294,7 @@
}
if (locations->WillCall()) {
- // Block all registers.
- for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
- if (!codegen_->IsCoreCalleeSaveRegister(i)) {
- BlockRegister(Location::RegisterLocation(i),
- position,
- position + 1);
- }
- }
- for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
- if (!codegen_->IsFloatingPointCalleeSaveRegister(i)) {
- BlockRegister(Location::FpuRegisterLocation(i),
- position,
- position + 1);
- }
- }
+ BlockRegisters(position, position + 1, /* caller_save_only */ true);
}
for (size_t i = 0; i < instruction->InputCount(); ++i) {
@@ -378,6 +383,10 @@
DCHECK(output.IsUnallocated() || output.IsConstant());
}
+ if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
+ AllocateSpillSlotForCatchPhi(instruction->AsPhi());
+ }
+
// If needed, add interval to the list of unhandled intervals.
if (current->HasSpillSlot() || instruction->IsConstant()) {
// Split just before first register use.
@@ -1282,6 +1291,8 @@
}
HInstruction* defined_by = parent->GetDefinedBy();
+ DCHECK(!defined_by->IsPhi() || !defined_by->AsPhi()->IsCatchPhi());
+
if (defined_by->IsParameterValue()) {
// Parameters have their own stack slot.
parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
@@ -1298,12 +1309,6 @@
return;
}
- LiveInterval* last_sibling = interval;
- while (last_sibling->GetNextSibling() != nullptr) {
- last_sibling = last_sibling->GetNextSibling();
- }
- size_t end = last_sibling->GetEnd();
-
GrowableArray<size_t>* spill_slots = nullptr;
switch (interval->GetType()) {
case Primitive::kPrimDouble:
@@ -1336,6 +1341,7 @@
}
}
+ size_t end = interval->GetLastSibling()->GetEnd();
if (parent->NeedsTwoSpillSlots()) {
if (slot == spill_slots->Size()) {
// We need a new spill slot.
@@ -1371,6 +1377,28 @@
|| destination.IsDoubleStackSlot();
}
+void RegisterAllocator::AllocateSpillSlotForCatchPhi(HPhi* phi) {
+ LiveInterval* interval = phi->GetLiveInterval();
+
+ HInstruction* previous_phi = phi->GetPrevious();
+ DCHECK(previous_phi == nullptr ||
+ previous_phi->AsPhi()->GetRegNumber() <= phi->GetRegNumber())
+ << "Phis expected to be sorted by vreg number, so that equivalent phis are adjacent.";
+
+ if (phi->IsVRegEquivalentOf(previous_phi)) {
+ // This is an equivalent of the previous phi. We need to assign the same
+ // catch phi slot.
+ DCHECK(previous_phi->GetLiveInterval()->HasSpillSlot());
+ interval->SetSpillSlot(previous_phi->GetLiveInterval()->GetSpillSlot());
+ } else {
+ // Allocate a new spill slot for this catch phi.
+ // TODO: Reuse spill slots when intervals of phis from different catch
+ // blocks do not overlap.
+ interval->SetSpillSlot(catch_phi_spill_slots_);
+ catch_phi_spill_slots_ += interval->NeedsTwoSpillSlots() ? 2 : 1;
+ }
+}
+
void RegisterAllocator::AddMove(HParallelMove* move,
Location source,
Location destination,
@@ -1497,7 +1525,7 @@
DCHECK(IsValidDestination(destination)) << destination;
if (source.Equals(destination)) return;
- DCHECK_EQ(block->GetSuccessors().size(), 1u);
+ DCHECK_EQ(block->NumberOfNormalSuccessors(), 1u);
HInstruction* last = block->GetLastInstruction();
// We insert moves at exit for phi predecessors and connecting blocks.
// A block ending with an if cannot branch to a block with phis because
@@ -1724,7 +1752,7 @@
// If `from` has only one successor, we can put the moves at the exit of it. Otherwise
// we need to put the moves at the entry of `to`.
- if (from->GetSuccessors().size() == 1) {
+ if (from->NumberOfNormalSuccessors() == 1) {
InsertParallelMoveAtExitOf(from,
interval->GetParent()->GetDefinedBy(),
source->ToLocation(),
@@ -1768,17 +1796,25 @@
} else if (instruction->IsCurrentMethod()) {
// The current method is always at offset 0.
DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
+ } else if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
+ DCHECK(current->HasSpillSlot());
+ size_t slot = current->GetSpillSlot()
+ + GetNumberOfSpillSlots()
+ + reserved_out_slots_
+ - catch_phi_spill_slots_;
+ current->SetSpillSlot(slot * kVRegSize);
} else if (current->HasSpillSlot()) {
// Adjust the stack slot, now that we know the number of them for each type.
// The way this implementation lays out the stack is the following:
- // [parameter slots ]
- // [double spill slots ]
- // [long spill slots ]
- // [float spill slots ]
- // [int/ref values ]
- // [maximum out values ] (number of arguments for calls)
- // [art method ].
- uint32_t slot = current->GetSpillSlot();
+ // [parameter slots ]
+ // [catch phi spill slots ]
+ // [double spill slots ]
+ // [long spill slots ]
+ // [float spill slots ]
+ // [int/ref values ]
+ // [maximum out values ] (number of arguments for calls)
+ // [art method ].
+ size_t slot = current->GetSpillSlot();
switch (current->GetType()) {
case Primitive::kPrimDouble:
slot += long_spill_slots_.Size();
@@ -1828,12 +1864,22 @@
// Resolve non-linear control flow across branches. Order does not matter.
for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- BitVector* live = liveness_.GetLiveInSet(*block);
- for (uint32_t idx : live->Indexes()) {
- HInstruction* current = liveness_.GetInstructionFromSsaIndex(idx);
- LiveInterval* interval = current->GetLiveInterval();
- for (HBasicBlock* predecessor : block->GetPredecessors()) {
- ConnectSplitSiblings(interval, predecessor, block);
+ if (block->IsCatchBlock()) {
+ // Instructions live at the top of catch blocks were forced to spill.
+ if (kIsDebugBuild) {
+ BitVector* live = liveness_.GetLiveInSet(*block);
+ for (uint32_t idx : live->Indexes()) {
+ LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
+ DCHECK(!interval->GetSiblingAt(block->GetLifetimeStart())->HasRegister());
+ }
+ }
+ } else {
+ BitVector* live = liveness_.GetLiveInSet(*block);
+ for (uint32_t idx : live->Indexes()) {
+ LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
+ ConnectSplitSiblings(interval, predecessor, block);
+ }
}
}
}
@@ -1841,16 +1887,20 @@
// Resolve phi inputs. Order does not matter.
for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* current = it.Current();
- for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
- HInstruction* phi = inst_it.Current();
- for (size_t i = 0, e = current->GetPredecessors().size(); i < e; ++i) {
- HBasicBlock* predecessor = current->GetPredecessor(i);
- DCHECK_EQ(predecessor->GetSuccessors().size(), 1u);
- HInstruction* input = phi->InputAt(i);
- Location source = input->GetLiveInterval()->GetLocationAt(
- predecessor->GetLifetimeEnd() - 1);
- Location destination = phi->GetLiveInterval()->ToLocation();
- InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
+ if (current->IsCatchBlock()) {
+ // Catch phi values are set at runtime by the exception delivery mechanism.
+ } else {
+ for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* phi = inst_it.Current();
+ for (size_t i = 0, e = current->GetPredecessors().size(); i < e; ++i) {
+ HBasicBlock* predecessor = current->GetPredecessor(i);
+ DCHECK_EQ(predecessor->NumberOfNormalSuccessors(), 1u);
+ HInstruction* input = phi->InputAt(i);
+ Location source = input->GetLiveInterval()->GetLocationAt(
+ predecessor->GetLifetimeEnd() - 1);
+ Location destination = phi->GetLiveInterval()->ToLocation();
+ InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
+ }
}
}
}
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index c29fe75..e030464 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -29,6 +29,7 @@
class HGraph;
class HInstruction;
class HParallelMove;
+class HPhi;
class LiveInterval;
class Location;
class SsaLivenessAnalysis;
@@ -72,7 +73,8 @@
return int_spill_slots_.Size()
+ long_spill_slots_.Size()
+ float_spill_slots_.Size()
- + double_spill_slots_.Size();
+ + double_spill_slots_.Size()
+ + catch_phi_spill_slots_;
}
static constexpr const char* kRegisterAllocatorPassName = "register";
@@ -99,10 +101,17 @@
// Update the interval for the register in `location` to cover [start, end).
void BlockRegister(Location location, size_t start, size_t end);
+ void BlockRegisters(size_t start, size_t end, bool caller_save_only = false);
- // Allocate a spill slot for the given interval.
+ // Allocate a spill slot for the given interval. Should be called in linear
+ // order of interval starting positions.
void AllocateSpillSlotFor(LiveInterval* interval);
+ // Allocate a spill slot for the given catch phi. Will allocate the same slot
+ // for phis which share the same vreg. Must be called in reverse linear order
+ // of lifetime positions and ascending vreg numbers for correctness.
+ void AllocateSpillSlotForCatchPhi(HPhi* phi);
+
// Connect adjacent siblings within blocks.
void ConnectSiblings(LiveInterval* interval);
@@ -202,6 +211,11 @@
GrowableArray<size_t> float_spill_slots_;
GrowableArray<size_t> double_spill_slots_;
+ // Spill slots allocated to catch phis. This category is special-cased because
+ // (1) slots are allocated prior to linear scan and in reverse linear order,
+ // (2) equivalent phis need to share slots despite having different types.
+ size_t catch_phi_spill_slots_;
+
// Instructions that need a safepoint.
GrowableArray<HInstruction*> safepoints_;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 0c1831b..63635f3 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -186,14 +186,25 @@
// as live_in.
for (HBasicBlock* successor : block->GetSuccessors()) {
live_in->Union(GetLiveInSet(*successor));
- size_t phi_input_index = successor->GetPredecessorIndexOf(block);
- for (HInstructionIterator inst_it(successor->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
- HInstruction* phi = inst_it.Current();
- HInstruction* input = phi->InputAt(phi_input_index);
- input->GetLiveInterval()->AddPhiUse(phi, phi_input_index, block);
- // A phi input whose last user is the phi dies at the end of the predecessor block,
- // and not at the phi's lifetime position.
- live_in->SetBit(input->GetSsaIndex());
+ if (successor->IsCatchBlock()) {
+ // Inputs of catch phis will be kept alive through their environment
+ // uses, allowing the runtime to copy their values to the corresponding
+ // catch phi spill slots when an exception is thrown.
+ // The only instructions which may not be recorded in the environments
+ // are constants created by the SSA builder as typed equivalents of
+ // untyped constants from the bytecode, or phis with only such constants
+ // as inputs (verified by SSAChecker). Their raw binary value must
+ // therefore be the same and we only need to keep alive one.
+ } else {
+ size_t phi_input_index = successor->GetPredecessorIndexOf(block);
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HInstruction* phi = phi_it.Current();
+ HInstruction* input = phi->InputAt(phi_input_index);
+ input->GetLiveInterval()->AddPhiUse(phi, phi_input_index, block);
+ // A phi input whose last user is the phi dies at the end of the predecessor block,
+ // and not at the phi's lifetime position.
+ live_in->SetBit(input->GetSsaIndex());
+ }
}
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index a7044de..ef396cb 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1209,6 +1209,9 @@
// A value that's not live in compiled code may still be needed in interpreter,
// due to code motion, etc.
if (env_holder->IsDeoptimize()) return true;
+ // A value live at a throwing instruction in a try block may be copied by
+ // the exception handler to its location at the top of the catch block.
+ if (env_holder->CanThrowIntoCatchBlock()) return true;
if (instruction->GetBlock()->GetGraph()->IsDebuggable()) return true;
return instruction->GetType() == Primitive::kPrimNot;
}
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 1f1530f..1f0bac5 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -286,7 +286,7 @@
stack_map.SetDexRegisterMapOffset(
stack_map_encoding_,
code_info.GetStackMapAt(entry.same_dex_register_map_as_, stack_map_encoding_)
- .GetDexRegisterMapOffset(stack_map_encoding_));
+ .GetDexRegisterMapOffset(stack_map_encoding_));
} else {
// New dex registers maps should be added to the stack map.
MemoryRegion register_region = dex_register_locations_region.Subregion(
diff --git a/compiler/utils/test_dex_file_builder.h b/compiler/utils/test_dex_file_builder.h
index b1d7b4c..b6a228c 100644
--- a/compiler/utils/test_dex_file_builder.h
+++ b/compiler/utils/test_dex_file_builder.h
@@ -216,7 +216,7 @@
std::unique_ptr<const DexFile> dex_file(DexFile::Open(
&dex_file_data_[0], dex_file_data_.size(), dex_location, 0u, nullptr, &error_msg));
CHECK(dex_file != nullptr) << error_msg;
- return std::move(dex_file);
+ return dex_file;
}
uint32_t GetStringIdx(const std::string& type) {
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index e6710ed..f10799c 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -326,9 +326,9 @@
[referrer] "r"(referrer)
: "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
"fp", "ra",
- "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
- "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
- "f27", "f28", "f29", "f30", "f31",
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
+ "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
+ "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
"memory"); // clobber.
#elif defined(__mips__) && defined(__LP64__)
__asm__ __volatile__ (
@@ -680,9 +680,9 @@
[referrer] "r"(referrer), [hidden] "r"(hidden)
: "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
"fp", "ra",
- "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
- "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
- "f27", "f28", "f29", "f30", "f31",
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
+ "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
+ "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
"memory"); // clobber.
#elif defined(__mips__) && defined(__LP64__)
__asm__ __volatile__ (
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index a84c20a..d6b2b7e 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -528,13 +528,12 @@
}
template<typename RootVisitorType>
-void ArtMethod::VisitRoots(RootVisitorType& visitor) {
+void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
ArtMethod* interface_method = nullptr;
mirror::Class* klass = declaring_class_.Read();
if (UNLIKELY(klass != nullptr && klass->IsProxyClass())) {
// For normal methods, dex cache shortcuts will be visited through the declaring class.
// However, for proxies we need to keep the interface method alive, so we visit its roots.
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
interface_method = mirror::DexCache::GetElementPtrSize(
GetDexCacheResolvedMethods(pointer_size),
GetDexMethodIndex(),
@@ -542,11 +541,11 @@
DCHECK(interface_method != nullptr);
DCHECK_EQ(interface_method,
Runtime::Current()->GetClassLinker()->FindMethodForProxy(klass, this));
- interface_method->VisitRoots(visitor);
+ interface_method->VisitRoots(visitor, pointer_size);
}
visitor.VisitRootIfNonNull(declaring_class_.AddressWithoutBarrier());
- ProfilingInfo* profiling_info = GetProfilingInfo();
+ ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
if (hotness_count_ != 0 && !IsNative() && profiling_info != nullptr) {
profiling_info->VisitRoots(visitor);
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 5dbea52..92648b9 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -223,28 +223,48 @@
return DexFile::kDexNoIndex;
}
-uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure) {
+uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc,
+ bool is_for_catch_handler,
+ bool abort_on_failure) {
const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
- MappingTable table(entry_point != nullptr ?
- GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
- if (table.TotalSize() == 0) {
- DCHECK_EQ(dex_pc, 0U);
- return 0; // Special no mapping/pc == 0 case
- }
- // Assume the caller wants a dex-to-pc mapping so check here first.
- typedef MappingTable::DexToPcIterator It;
- for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ if (IsOptimized(sizeof(void*))) {
+ // Optimized code does not have a mapping table. Search for the dex-to-pc
+ // mapping in stack maps.
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+
+ // All stack maps are stored in the same CodeItem section, safepoint stack
+ // maps first, then catch stack maps. We use `is_for_catch_handler` to select
+ // the order of iteration.
+ StackMap stack_map =
+ LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
+ : code_info.GetStackMapForDexPc(dex_pc, encoding);
+ if (stack_map.IsValid()) {
+ return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
+ }
+ } else {
+ MappingTable table(entry_point != nullptr ?
+ GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
+ if (table.TotalSize() == 0) {
+ DCHECK_EQ(dex_pc, 0U);
+ return 0; // Special no mapping/pc == 0 case
+ }
+ // Assume the caller wants a dex-to-pc mapping so check here first.
+ typedef MappingTable::DexToPcIterator It;
+ for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ }
+ }
+ // Now check pc-to-dex mappings.
+ typedef MappingTable::PcToDexIterator It2;
+ for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ }
}
}
- // Now check pc-to-dex mappings.
- typedef MappingTable::PcToDexIterator It2;
- for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
+
if (abort_on_failure) {
LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
<< " in " << PrettyMethod(this);
@@ -581,12 +601,13 @@
}
ProfilingInfo* ArtMethod::CreateProfilingInfo() {
+ DCHECK(!Runtime::Current()->IsAotCompiler());
ProfilingInfo* info = ProfilingInfo::Create(this);
MemberOffset offset = ArtMethod::EntryPointFromJniOffset(sizeof(void*));
uintptr_t pointer = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
if (!reinterpret_cast<Atomic<ProfilingInfo*>*>(pointer)->
CompareExchangeStrongSequentiallyConsistent(nullptr, info)) {
- return GetProfilingInfo();
+ return GetProfilingInfo(sizeof(void*));
} else {
return info;
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 3f2161f..f78c827 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -392,8 +392,8 @@
ProfilingInfo* CreateProfilingInfo() SHARED_REQUIRES(Locks::mutator_lock_);
- ProfilingInfo* GetProfilingInfo() {
- return reinterpret_cast<ProfilingInfo*>(GetEntryPointFromJni());
+ ProfilingInfo* GetProfilingInfo(size_t pointer_size) {
+ return reinterpret_cast<ProfilingInfo*>(GetEntryPointFromJniPtrSize(pointer_size));
}
void* GetEntryPointFromJni() {
@@ -442,7 +442,9 @@
SHARED_REQUIRES(Locks::mutator_lock_);
// Converts a dex PC to a native PC.
- uintptr_t ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure = true)
+ uintptr_t ToNativeQuickPc(const uint32_t dex_pc,
+ bool is_for_catch_handler,
+ bool abort_on_failure = true)
SHARED_REQUIRES(Locks::mutator_lock_);
MethodReference ToMethodReference() SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -458,7 +460,7 @@
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
- void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS;
+ void VisitRoots(RootVisitorType& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS;
const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index d5691af..ad69676 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2443,6 +2443,8 @@
private:
const JDWP::FrameId frame_id_;
JDWP::JdwpError error_;
+
+ DISALLOW_COPY_AND_ASSIGN(FindFrameVisitor);
};
JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
@@ -2822,7 +2824,6 @@
CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- self_(self),
exception_(exception),
handle_scope_(self),
this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
@@ -2852,7 +2853,7 @@
}
if (dex_pc != DexFile::kDexNoIndex) {
- StackHandleScope<1> hs(self_);
+ StackHandleScope<1> hs(GetThread());
uint32_t found_dex_pc;
Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
bool unused_clear_exception;
@@ -2887,7 +2888,6 @@
}
private:
- Thread* const self_;
const Handle<mirror::Throwable>& exception_;
StackHandleScope<1> handle_scope_;
MutableHandle<mirror::Object> this_at_throw_;
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 33d756e..9f84bd2 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -186,13 +186,15 @@
fake_stack.push_back(0);
}
- fake_stack.push_back(method_g_->ToNativeQuickPc(dex_pc)); // return pc
+ fake_stack.push_back(
+ method_g_->ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
// Create/push fake 16byte stack frame for method g
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
fake_stack.push_back(0);
fake_stack.push_back(0);
- fake_stack.push_back(method_f_->ToNativeQuickPc(dex_pc)); // return pc
+ fake_stack.push_back(
+ method_g_->ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
// Create/push fake 16byte stack frame for method f
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 4cbcdc5..399591b 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1472,7 +1472,7 @@
ArtMethod* method = gc_root_source->GetArtMethod();
LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
RootPrinter root_printer;
- method->VisitRoots(root_printer);
+ method->VisitRoots(root_printer, sizeof(void*));
}
ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index f485682..d437dd5 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -98,7 +98,7 @@
uint32_t dex_pc,
ArtMethod* callee ATTRIBUTE_UNUSED) {
DCHECK(this_object != nullptr);
- ProfilingInfo* info = caller->GetProfilingInfo();
+ ProfilingInfo* info = caller->GetProfilingInfo(sizeof(void*));
if (info != nullptr) {
info->AddInvokeInfo(thread, dex_pc, this_object->GetClass());
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 10b381d..93f2aea 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -842,10 +842,10 @@
}
}
for (ArtMethod& method : GetDirectMethods(pointer_size)) {
- method.VisitRoots(visitor);
+ method.VisitRoots(visitor, pointer_size);
}
for (ArtMethod& method : GetVirtualMethods(pointer_size)) {
- method.VisitRoots(visitor);
+ method.VisitRoots(visitor, pointer_size);
}
}
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index 7e8c551..6a77a9e 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -57,7 +57,8 @@
class BoundedStackVisitor : public StackVisitor {
public:
BoundedStackVisitor(std::vector<std::pair<ArtMethod*, uint32_t>>* stack,
- Thread* thread, uint32_t max_depth)
+ Thread* thread,
+ uint32_t max_depth)
SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
stack_(stack),
@@ -80,9 +81,11 @@
}
private:
- std::vector<std::pair<ArtMethod*, uint32_t>>* stack_;
+ std::vector<std::pair<ArtMethod*, uint32_t>>* const stack_;
const uint32_t max_depth_;
uint32_t depth_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundedStackVisitor);
};
// This is called from either a thread list traversal or from a checkpoint. Regardless
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 60defba..d797d2a 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -47,7 +47,6 @@
QuickExceptionHandler* exception_handler)
SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- self_(self),
exception_(exception),
exception_handler_(exception_handler) {
}
@@ -90,14 +89,15 @@
}
if (dex_pc != DexFile::kDexNoIndex) {
bool clear_exception = false;
- StackHandleScope<1> hs(self_);
+ StackHandleScope<1> hs(GetThread());
Handle<mirror::Class> to_find(hs.NewHandle((*exception_)->GetClass()));
uint32_t found_dex_pc = method->FindCatchBlock(to_find, dex_pc, &clear_exception);
exception_handler_->SetClearException(clear_exception);
if (found_dex_pc != DexFile::kDexNoIndex) {
exception_handler_->SetHandlerMethod(method);
exception_handler_->SetHandlerDexPc(found_dex_pc);
- exception_handler_->SetHandlerQuickFramePc(method->ToNativeQuickPc(found_dex_pc));
+ exception_handler_->SetHandlerQuickFramePc(
+ method->ToNativeQuickPc(found_dex_pc, /* is_catch_handler */ true));
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
return false; // End stack walk.
}
@@ -105,7 +105,6 @@
return true; // Continue stack walk.
}
- Thread* const self_;
// The exception we're looking for the catch block of.
Handle<mirror::Throwable>* exception_;
// The quick exception handler we're visiting for.
@@ -146,6 +145,107 @@
// Put exception back in root set with clear throw location.
self_->SetException(exception_ref.Get());
}
+ // If the handler is in optimized code, we need to set the catch environment.
+ if (*handler_quick_frame_ != nullptr &&
+ handler_method_ != nullptr &&
+ handler_method_->IsOptimized(sizeof(void*))) {
+ SetCatchEnvironmentForOptimizedHandler(&visitor);
+ }
+}
+
+static VRegKind ToVRegKind(DexRegisterLocation::Kind kind) {
+ // Slightly hacky since we cannot map DexRegisterLocationKind and VRegKind
+ // one to one. However, StackVisitor::GetVRegFromOptimizedCode only needs to
+ // distinguish between core/FPU registers and low/high bits on 64-bit.
+ switch (kind) {
+ case DexRegisterLocation::Kind::kConstant:
+ case DexRegisterLocation::Kind::kInStack:
+ // VRegKind is ignored.
+ return VRegKind::kUndefined;
+
+ case DexRegisterLocation::Kind::kInRegister:
+ // Selects core register. For 64-bit registers, selects low 32 bits.
+ return VRegKind::kLongLoVReg;
+
+ case DexRegisterLocation::Kind::kInRegisterHigh:
+ // Selects core register. For 64-bit registers, selects high 32 bits.
+ return VRegKind::kLongHiVReg;
+
+ case DexRegisterLocation::Kind::kInFpuRegister:
+ // Selects FPU register. For 64-bit registers, selects low 32 bits.
+ return VRegKind::kDoubleLoVReg;
+
+ case DexRegisterLocation::Kind::kInFpuRegisterHigh:
+ // Selects FPU register. For 64-bit registers, selects high 32 bits.
+ return VRegKind::kDoubleHiVReg;
+
+ default:
+ LOG(FATAL) << "Unexpected vreg location "
+ << DexRegisterLocation::PrettyDescriptor(kind);
+ UNREACHABLE();
+ }
+}
+
+void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor) {
+ DCHECK(!is_deoptimization_);
+ DCHECK(*handler_quick_frame_ != nullptr) << "Method should not be called on upcall exceptions";
+ DCHECK(handler_method_ != nullptr && handler_method_->IsOptimized(sizeof(void*)));
+
+ if (kDebugExceptionDelivery) {
+ self_->DumpStack(LOG(INFO) << "Setting catch phis: ");
+ }
+
+ const size_t number_of_vregs = handler_method_->GetCodeItem()->registers_size_;
+ CodeInfo code_info = handler_method_->GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+
+ // Find stack map of the throwing instruction.
+ StackMap throw_stack_map =
+ code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset(), encoding);
+ DCHECK(throw_stack_map.IsValid());
+ DexRegisterMap throw_vreg_map =
+ code_info.GetDexRegisterMapOf(throw_stack_map, encoding, number_of_vregs);
+
+ // Find stack map of the catch block.
+ StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc(), encoding);
+ DCHECK(catch_stack_map.IsValid());
+ DexRegisterMap catch_vreg_map =
+ code_info.GetDexRegisterMapOf(catch_stack_map, encoding, number_of_vregs);
+
+ // Copy values between them.
+ for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
+ DexRegisterLocation::Kind catch_location =
+ catch_vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+ if (catch_location == DexRegisterLocation::Kind::kNone) {
+ continue;
+ }
+ DCHECK(catch_location == DexRegisterLocation::Kind::kInStack);
+
+ // Get vreg value from its current location.
+ uint32_t vreg_value;
+ VRegKind vreg_kind = ToVRegKind(throw_vreg_map.GetLocationKind(vreg,
+ number_of_vregs,
+ code_info,
+ encoding));
+ bool get_vreg_success = stack_visitor->GetVReg(stack_visitor->GetMethod(),
+ vreg,
+ vreg_kind,
+ &vreg_value);
+ CHECK(get_vreg_success) << "VReg " << vreg << " was optimized out ("
+ << "method=" << PrettyMethod(stack_visitor->GetMethod()) << ", "
+ << "dex_pc=" << stack_visitor->GetDexPc() << ", "
+ << "native_pc_offset=" << stack_visitor->GetNativePcOffset() << ")";
+
+ // Copy value to the catch phi's stack slot.
+ int32_t slot_offset = catch_vreg_map.GetStackOffsetInBytes(vreg,
+ number_of_vregs,
+ code_info,
+ encoding);
+ ArtMethod** frame_top = stack_visitor->GetCurrentQuickFrame();
+ uint8_t* slot_address = reinterpret_cast<uint8_t*>(frame_top) + slot_offset;
+ uint32_t* slot_ptr = reinterpret_cast<uint32_t*>(slot_address);
+ *slot_ptr = vreg_value;
+ }
}
// Prepares deoptimization.
@@ -154,7 +254,6 @@
DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler)
SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- self_(self),
exception_handler_(exception_handler),
prev_shadow_frame_(nullptr),
stacked_shadow_frame_pushed_(false) {
@@ -171,7 +270,8 @@
// In case there is no deoptimized shadow frame for this upcall, we still
// need to push a nullptr to the stack since there is always a matching pop after
// the long jump.
- self_->PushStackedShadowFrame(nullptr, StackedShadowFrameType::kDeoptimizationShadowFrame);
+ GetThread()->PushStackedShadowFrame(nullptr,
+ StackedShadowFrameType::kDeoptimizationShadowFrame);
stacked_shadow_frame_pushed_ = true;
}
return false; // End stack walk.
@@ -200,18 +300,19 @@
CHECK(code_item != nullptr) << "No code item for " << PrettyMethod(m);
uint16_t num_regs = code_item->registers_size_;
uint32_t dex_pc = GetDexPc();
- StackHandleScope<2> hs(self_); // Dex cache, class loader and method.
+ StackHandleScope<2> hs(GetThread()); // Dex cache, class loader and method.
mirror::Class* declaring_class = m->GetDeclaringClass();
Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
- verifier::MethodVerifier verifier(self_, h_dex_cache->GetDexFile(), h_dex_cache, h_class_loader,
- &m->GetClassDef(), code_item, m->GetDexMethodIndex(),
- m, m->GetAccessFlags(), true, true, true, true);
+ verifier::MethodVerifier verifier(GetThread(), h_dex_cache->GetDexFile(), h_dex_cache,
+ h_class_loader, &m->GetClassDef(), code_item,
+ m->GetDexMethodIndex(), m, m->GetAccessFlags(), true, true,
+ true, true);
bool verifier_success = verifier.Verify();
CHECK(verifier_success) << PrettyMethod(m);
ShadowFrame* new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, m, dex_pc);
{
- ScopedStackedShadowFramePusher pusher(self_, new_frame,
+ ScopedStackedShadowFramePusher pusher(GetThread(), new_frame,
StackedShadowFrameType::kShadowFrameUnderConstruction);
const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc));
@@ -318,13 +419,13 @@
// Will be popped after the long jump after DeoptimizeStack(),
// right before interpreter::EnterInterpreterFromDeoptimize().
stacked_shadow_frame_pushed_ = true;
- self_->PushStackedShadowFrame(new_frame, StackedShadowFrameType::kDeoptimizationShadowFrame);
+ GetThread()->PushStackedShadowFrame(new_frame,
+ StackedShadowFrameType::kDeoptimizationShadowFrame);
}
prev_shadow_frame_ = new_frame;
return true;
}
- Thread* const self_;
QuickExceptionHandler* const exception_handler_;
ShadowFrame* prev_shadow_frame_;
bool stacked_shadow_frame_pushed_;
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 4db95a8..2e05c7e 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -49,11 +49,14 @@
// Deoptimize the stack to the upcall. For every compiled frame, we create a "copy"
// shadow frame that will be executed with the interpreter.
void DeoptimizeStack() SHARED_REQUIRES(Locks::mutator_lock_);
-
// Update the instrumentation stack by removing all methods that will be unwound
// by the exception being thrown.
void UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ // Set up environment before delivering an exception to optimized code.
+ void SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Long jump either to a catch handler or to the upcall.
NO_RETURN void DoLongJump() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index bbadb1e..6b144cf 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1399,19 +1399,20 @@
// Visiting the roots of these ArtMethods is not currently required since all the GcRoots are
// null.
BufferedRootVisitor<16> buffered_visitor(visitor, RootInfo(kRootVMInternal));
+ const size_t pointer_size = GetClassLinker()->GetImagePointerSize();
if (HasResolutionMethod()) {
- resolution_method_->VisitRoots(buffered_visitor);
+ resolution_method_->VisitRoots(buffered_visitor, pointer_size);
}
if (HasImtConflictMethod()) {
- imt_conflict_method_->VisitRoots(buffered_visitor);
+ imt_conflict_method_->VisitRoots(buffered_visitor, pointer_size);
}
if (imt_unimplemented_method_ != nullptr) {
- imt_unimplemented_method_->VisitRoots(buffered_visitor);
+ imt_unimplemented_method_->VisitRoots(buffered_visitor, pointer_size);
}
for (size_t i = 0; i < kLastCalleeSaveType; ++i) {
auto* m = reinterpret_cast<ArtMethod*>(callee_save_methods_[i]);
if (m != nullptr) {
- m->VisitRoots(buffered_visitor);
+ m->VisitRoots(buffered_visitor, pointer_size);
}
}
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index a765a3f..d956f0e 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -325,6 +325,10 @@
bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+
+ // X86 float registers are 64-bit and the logic below does not apply.
+ DCHECK(!is_float || kRuntimeISA != InstructionSet::kX86);
+
if (!IsAccessibleRegister(reg, is_float)) {
return false;
}
diff --git a/runtime/stack.h b/runtime/stack.h
index 2562738..5bbf003 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -441,6 +441,10 @@
void WalkStack(bool include_transitions = false)
SHARED_REQUIRES(Locks::mutator_lock_);
+ Thread* GetThread() const {
+ return thread_;
+ }
+
ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_);
bool IsShadowFrame() const {
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 07b79b5..a15a081 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -1115,7 +1115,7 @@
region_.StoreUnaligned<NumberOfStackMapsType>(kNumberOfStackMapsOffset, number_of_stack_maps);
}
- // Get the size all the stack maps of this CodeInfo object, in bytes.
+ // Get the size of all the stack maps of this CodeInfo object, in bytes.
size_t GetStackMapsSize(const StackMapEncoding& encoding) const {
return encoding.ComputeStackMapSize() * GetNumberOfStackMaps();
}
@@ -1174,9 +1174,23 @@
return StackMap();
}
+ // Searches the stack map list backwards because catch stack maps are stored
+ // at the end.
+ StackMap GetCatchStackMapForDexPc(uint32_t dex_pc, const StackMapEncoding& encoding) const {
+ for (size_t i = GetNumberOfStackMaps(); i > 0; --i) {
+ StackMap stack_map = GetStackMapAt(i - 1, encoding);
+ if (stack_map.GetDexPc(encoding) == dex_pc) {
+ return stack_map;
+ }
+ }
+ return StackMap();
+ }
+
StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset,
const StackMapEncoding& encoding) const {
- // TODO: stack maps are sorted by native pc, we can do a binary search.
+ // TODO: Safepoint stack maps are sorted by native_pc_offset but catch stack
+ // maps are not. If we knew that the method does not have try/catch,
+ // we could do binary search.
for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
StackMap stack_map = GetStackMapAt(i, encoding);
if (stack_map.GetNativePcOffset(encoding) == native_pc_offset) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 86ac140..6e10368 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1185,7 +1185,6 @@
SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
os(os_in),
- thread(thread_in),
can_allocate(can_allocate_in),
last_method(nullptr),
last_line_number(0),
@@ -1233,7 +1232,7 @@
}
os << "\n";
if (frame_count == 0) {
- Monitor::DescribeWait(os, thread);
+ Monitor::DescribeWait(os, GetThread());
}
if (can_allocate) {
// Visit locks, but do not abort on errors. This would trigger a nested abort.
@@ -1269,7 +1268,6 @@
}
std::ostream& os;
- const Thread* thread;
const bool can_allocate;
ArtMethod* last_method;
int last_line_number;
@@ -1825,6 +1823,8 @@
uint32_t depth_;
uint32_t skip_depth_;
bool skipping_;
+
+ DISALLOW_COPY_AND_ASSIGN(CountStackDepthVisitor);
};
template<bool kTransactionActive>
@@ -1891,7 +1891,9 @@
// An array of the methods on the stack, the last entries are the dex PCs.
mirror::PointerArray* trace_;
// For cross compilation.
- size_t pointer_size_;
+ const size_t pointer_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor);
};
template<bool kTransactionActive>
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 5a44947..e2743ce 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -73,6 +73,8 @@
private:
std::vector<ArtMethod*>* const method_trace_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuildStackTraceVisitor);
};
static const char kTraceTokenChar = '*';
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 767e1de..55a77ac 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -22,7 +22,9 @@
#define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
int t[] = {__VA_ARGS__}; \
int t_size = sizeof(t) / sizeof(*t); \
- uintptr_t native_quick_pc = m->ToNativeQuickPc(dex_pc, abort_if_not_found); \
+ uintptr_t native_quick_pc = m->ToNativeQuickPc(dex_pc, \
+ /* is_catch_handler */ false, \
+ abort_if_not_found); \
if (native_quick_pc != UINTPTR_MAX) { \
CheckReferences(t, t_size, m->NativeQuickPcOffset(native_quick_pc)); \
} \
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 59722ad..78f8842 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -235,6 +235,7 @@
return result ? JNI_TRUE : JNI_FALSE;
#else
+ UNUSED(pid_int);
return JNI_FALSE;
#endif
}
diff --git a/test/510-checker-try-catch/smali/RegisterAllocator.smali b/test/510-checker-try-catch/smali/RegisterAllocator.smali
new file mode 100644
index 0000000..fd3c84c
--- /dev/null
+++ b/test/510-checker-try-catch/smali/RegisterAllocator.smali
@@ -0,0 +1,94 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LRegisterAllocator;
+
+.super Ljava/lang/Object;
+
+# Test that catch phis are allocated to a stack slot, and that equivalent catch
+# phis are allocated to the same stack slot.
+
+## CHECK-START: int RegisterAllocator.testEquivalentCatchPhiSlot_Single(int, int, int) register (after)
+## CHECK-DAG: Phi reg:0 is_catch_phi:true locations:{{\[.*\]}}-><<SlotA1:\d+>>(sp)
+## CHECK-DAG: Phi reg:0 is_catch_phi:true locations:{{\[.*\]}}-><<SlotA2:\d+>>(sp)
+## CHECK-DAG: Phi reg:1 is_catch_phi:true locations:{{\[.*\]}}-><<SlotB:\d+>>(sp)
+## CHECK-EVAL: <<SlotA1>> == <<SlotA2>>
+## CHECK-EVAL: <<SlotB>> != <<SlotA1>>
+
+.method public static testEquivalentCatchPhiSlot_Single(III)I
+ .registers 8
+
+ :try_start
+ const/high16 v0, 0x40000000 # float 2
+ move v1, p0
+ div-int/2addr p0, p1
+
+ const/high16 v0, 0x41000000 # float 8
+ move v1, p1
+ div-int/2addr p0, p2
+ goto :return
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ :catch_all
+ # 2x CatchPhi for v0, 1x for v1
+ if-eqz v1, :use_as_float
+
+ :use_as_int
+ goto :return
+
+ :use_as_float
+ float-to-int v0, v0
+
+ :return
+ return v0
+.end method
+
+# Test that wide catch phis are allocated to two stack slots.
+
+## CHECK-START: long RegisterAllocator.testEquivalentCatchPhiSlot_Wide(int, int, int) register (after)
+## CHECK-DAG: Phi reg:0 is_catch_phi:true locations:{{\[.*\]}}->2x<<SlotB1:\d+>>(sp)
+## CHECK-DAG: Phi reg:0 is_catch_phi:true locations:{{\[.*\]}}->2x<<SlotB2:\d+>>(sp)
+## CHECK-DAG: Phi reg:2 is_catch_phi:true locations:{{\[.*\]}}-><<SlotA:\d+>>(sp)
+## CHECK-EVAL: <<SlotB1>> == <<SlotB2>>
+## CHECK-EVAL: abs(<<SlotA>> - <<SlotB1>>) >= 8
+
+.method public static testEquivalentCatchPhiSlot_Wide(III)J
+ .registers 8
+
+ :try_start
+ const-wide/high16 v0, 0x4000000000000000L # double 2
+ move v2, p0
+ div-int/2addr p0, p1
+
+ const-wide/high16 v0, 0x4100000000000000L # double 8
+ move v2, p1
+ div-int/2addr p0, p2
+ goto :return
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ :catch_all
+ # 2x CatchPhi for v0, 1x for v2
+ if-eqz v2, :use_as_double
+
+ :use_as_long
+ goto :return
+
+ :use_as_double
+ double-to-long v0, v0
+
+ :return
+ return-wide v0
+.end method
diff --git a/test/510-checker-try-catch/smali/Runtime.smali b/test/510-checker-try-catch/smali/Runtime.smali
new file mode 100644
index 0000000..19b43a3
--- /dev/null
+++ b/test/510-checker-try-catch/smali/Runtime.smali
@@ -0,0 +1,555 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LRuntime;
+.super Ljava/lang/Object;
+
+# The following tests all share the same structure, signature and return values:
+# - foo(false, false): normal path, returns 42
+# - foo(true, false): exceptional path #1, returns 3
+# - foo(false, true): exceptional path #2, returns 8
+# - foo(true, true): undefined
+
+
+# Test register allocation of 32-bit core intervals crossing catch block positions.
+
+## CHECK-START: int Runtime.testUseAfterCatch_int(boolean, boolean) register (after)
+## CHECK-NOT: Phi is_catch_phi:true
+
+.method public static testUseAfterCatch_int(ZZ)I
+ .registers 6
+
+ sget-object v0, LRuntime;->intArray:[I
+ const/4 v1, 0
+ aget v1, v0, v1
+ const/4 v2, 1
+ aget v2, v0, v2
+ const/4 v3, 2
+ aget v3, v0, v3
+
+ :try_start
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ return v3 # Normal path return.
+
+ :catch_all
+ if-eqz p0, :second_throw
+ return v1 # Exceptional path #1 return.
+
+ :second_throw
+ return v2 # Exceptional path #2 return.
+.end method
+
+
+# Test register allocation of 64-bit core intervals crossing catch block positions.
+
+# The sum of the low and high 32 bits treated as integers is returned to prove
+# that both vregs allocated correctly.
+
+## CHECK-START: int Runtime.testUseAfterCatch_long(boolean, boolean) register (after)
+## CHECK-NOT: Phi is_catch_phi:true
+
+.method public static testUseAfterCatch_long(ZZ)I
+ .registers 10
+
+ sget-object v0, LRuntime;->longArray:[J
+ const/4 v1, 0
+ aget-wide v1, v0, v1
+ const/4 v3, 1
+ aget-wide v3, v0, v3
+ const/4 v5, 2
+ aget-wide v5, v0, v5
+
+ :try_start
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ const v0, 32
+ ushr-long v7, v5, v0
+ long-to-int v5, v5
+ long-to-int v7, v7
+ add-int/2addr v5, v7
+ return v5 # Normal path return.
+
+ :catch_all
+ const v0, 32
+ if-eqz p0, :second_throw
+
+ ushr-long v7, v1, v0
+ long-to-int v1, v1
+ long-to-int v7, v7
+ add-int/2addr v1, v7
+ return v1 # Exceptional path #1 return.
+
+ :second_throw
+ ushr-long v7, v3, v0
+ long-to-int v3, v3
+ long-to-int v7, v7
+ add-int/2addr v3, v7
+ return v3 # Exceptional path #2 return.
+.end method
+
+
+# Test register allocation of 32-bit floating-point intervals crossing catch block positions.
+
+## CHECK-START: int Runtime.testUseAfterCatch_float(boolean, boolean) register (after)
+## CHECK-NOT: Phi is_catch_phi:true
+
+.method public static testUseAfterCatch_float(ZZ)I
+ .registers 6
+
+ sget-object v0, LRuntime;->floatArray:[F
+ const/4 v1, 0
+ aget v1, v0, v1
+ const/4 v2, 1
+ aget v2, v0, v2
+ const/4 v3, 2
+ aget v3, v0, v3
+
+ :try_start
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ float-to-int v3, v3
+ return v3 # Normal path return.
+
+ :catch_all
+ if-eqz p0, :second_throw
+ float-to-int v1, v1
+ return v1 # Exceptional path #1 return.
+
+ :second_throw
+ float-to-int v2, v2
+ return v2 # Exceptional path #2 return.
+.end method
+
+
+# Test register allocation of 64-bit floating-point intervals crossing catch block positions.
+
+## CHECK-START: int Runtime.testUseAfterCatch_double(boolean, boolean) register (after)
+## CHECK-NOT: Phi is_catch_phi:true
+
+.method public static testUseAfterCatch_double(ZZ)I
+ .registers 10
+
+ sget-object v0, LRuntime;->doubleArray:[D
+ const/4 v1, 0
+ aget-wide v1, v0, v1
+ const/4 v3, 1
+ aget-wide v3, v0, v3
+ const/4 v5, 2
+ aget-wide v5, v0, v5
+
+ :try_start
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ double-to-int v5, v5
+ return v5 # Normal path return.
+
+ :catch_all
+ if-eqz p0, :second_throw
+ double-to-int v1, v1
+ return v1 # Exceptional path #1 return.
+
+ :second_throw
+ double-to-int v3, v3
+ return v3 # Exceptional path #2 return.
+.end method
+
+
+# Test catch-phi runtime support for constant values.
+
+# Register v0 holds different constants at two throwing instructions. Runtime is
+# expected to load them from stack map and copy to the catch phi's location.
+
+## CHECK-START: int Runtime.testCatchPhi_const(boolean, boolean) register (after)
+## CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+## CHECK-DAG: <<Const8:i\d+>> IntConstant 8
+## CHECK-DAG: Phi [<<Const3>>,<<Const8>>] is_catch_phi:true
+
+.method public static testCatchPhi_const(ZZ)I
+ .registers 3
+
+ :try_start
+ const v0, 3
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+
+ const v0, 8
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ const v0, 42
+ return v0 # Normal path return.
+
+ :catch_all
+ return v0 # Exceptional path #1/#2 return.
+.end method
+
+
+# Test catch-phi runtime support for 32-bit values stored in core registers.
+
+# Register v0 holds different integer values at two throwing instructions.
+# Runtime is expected to find their location in the stack map and copy the value
+# to the location of the catch phi.
+
+## CHECK-START: int Runtime.testCatchPhi_int(boolean, boolean) register (after)
+## CHECK-DAG: <<Val1:i\d+>> ArrayGet
+## CHECK-DAG: <<Val2:i\d+>> ArrayGet
+## CHECK-DAG: Phi [<<Val1>>,<<Val2>>] is_catch_phi:true
+
+.method public static testCatchPhi_int(ZZ)I
+ .registers 6
+
+ sget-object v0, LRuntime;->intArray:[I
+ const/4 v1, 0
+ aget v1, v0, v1
+ const/4 v2, 1
+ aget v2, v0, v2
+ const/4 v3, 2
+ aget v3, v0, v3
+
+ :try_start
+ move v0, v1 # Set catch phi value
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+
+ move v0, v2 # Set catch phi value
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ return v3 # Normal path return.
+
+ :catch_all
+ return v0 # Exceptional path #1/#2 return.
+.end method
+
+
+# Test catch-phi runtime support for 64-bit values stored in core registers.
+
+# Register pair (v0, v1) holds different long values at two throwing instructions.
+# Runtime is expected to find their location in the stack map and copy the value
+# to the location of the catch phi. The sum of the low and high 32 bits treated
+# as integers is returned to prove that both vregs were copied.
+
+# Note: values will be spilled on x86 because of too few callee-save core registers.
+
+## CHECK-START: int Runtime.testCatchPhi_long(boolean, boolean) register (after)
+## CHECK-DAG: <<Val1:j\d+>> ArrayGet
+## CHECK-DAG: <<Val2:j\d+>> ArrayGet
+## CHECK-DAG: Phi [<<Val1>>,<<Val2>>] is_catch_phi:true
+
+.method public static testCatchPhi_long(ZZ)I
+ .registers 10
+
+ sget-object v0, LRuntime;->longArray:[J
+ const/4 v2, 0
+ aget-wide v2, v0, v2
+ const/4 v4, 1
+ aget-wide v4, v0, v4
+ const/4 v6, 2
+ aget-wide v6, v0, v6
+
+ :try_start
+ move-wide v0, v2 # Set catch phi value
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+
+ move-wide v0, v4 # Set catch phi value
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ const v2, 32
+ ushr-long v2, v6, v2
+ long-to-int v2, v2
+ long-to-int v6, v6
+ add-int/2addr v6, v2
+ return v6 # Normal path return.
+
+ :catch_all
+ const v2, 32
+ ushr-long v2, v0, v2
+ long-to-int v2, v2
+ long-to-int v0, v0
+ add-int/2addr v0, v2
+ return v0 # Exceptional path #1/#2 return.
+.end method
+
+
+# Test catch-phi runtime support for 32-bit values stored in FPU registers.
+
+# Register v0 holds different float values at two throwing instructions. Runtime
+# is expected to find their location in the stack map and copy the value to the
+# location of the catch phi. The value is converted to int and returned.
+
+# Note: values will be spilled on x86 as there are no callee-save FPU registers.
+
+## CHECK-START: int Runtime.testCatchPhi_float(boolean, boolean) register (after)
+## CHECK-DAG: <<Val1:f\d+>> ArrayGet
+## CHECK-DAG: <<Val2:f\d+>> ArrayGet
+## CHECK-DAG: Phi [<<Val1>>,<<Val2>>] is_catch_phi:true
+
+.method public static testCatchPhi_float(ZZ)I
+ .registers 6
+
+ sget-object v0, LRuntime;->floatArray:[F
+ const/4 v1, 0
+ aget v1, v0, v1
+ const/4 v2, 1
+ aget v2, v0, v2
+ const/4 v3, 2
+ aget v3, v0, v3
+
+ :try_start
+ move v0, v1 # Set catch phi value
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+
+ move v0, v2 # Set catch phi value
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ float-to-int v3, v3
+ return v3 # Normal path return.
+
+ :catch_all
+ float-to-int v0, v0
+ return v0 # Exceptional path #1/#2 return.
+.end method
+
+
+# Test catch-phi runtime support for 64-bit values stored in FPU registers.
+
+# Register pair (v0, v1) holds different double values at two throwing instructions.
+# Runtime is expected to find their location in the stack map and copy the value
+# to the location of the catch phi. The value is converted to int and returned.
+# Values were chosen so that all 64 bits are used.
+
+# Note: values will be spilled on x86 as there are no callee-save FPU registers.
+
+## CHECK-START: int Runtime.testCatchPhi_double(boolean, boolean) register (after)
+## CHECK-DAG: <<Val1:d\d+>> ArrayGet
+## CHECK-DAG: <<Val2:d\d+>> ArrayGet
+## CHECK-DAG: Phi [<<Val1>>,<<Val2>>] is_catch_phi:true
+
+.method public static testCatchPhi_double(ZZ)I
+ .registers 10
+
+ sget-object v0, LRuntime;->doubleArray:[D
+ const/4 v2, 0
+ aget-wide v2, v0, v2
+ const/4 v4, 1
+ aget-wide v4, v0, v4
+ const/4 v6, 2
+ aget-wide v6, v0, v6
+
+ :try_start
+ move-wide v0, v2 # Set catch phi value
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+
+ move-wide v0, v4 # Set catch phi value
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ double-to-int v6, v6
+ return v6
+
+ :catch_all
+ double-to-int v0, v0
+ return v0
+.end method
+
+# Test catch-phi runtime support for 32-bit values stored on the stack.
+
+# Register v0 holds different integer values at two throwing instructions.
+# These values were forced to spill by an always-throwing try/catch after their
+# definition. Runtime is expected to find their location in the stack map and
+# copy the value to the location of the catch phi. The value is then returned.
+
+## CHECK-START: int Runtime.testCatchPhi_singleSlot(boolean, boolean) register (after)
+## CHECK: <<Val1:i\d+>> ArrayGet
+## CHECK-NEXT: ParallelMove moves:[{{.*->}}{{\d+}}(sp)]
+## CHECK: <<Val2:i\d+>> ArrayGet
+## CHECK-NEXT: ParallelMove moves:[{{.*->}}{{\d+}}(sp)]
+## CHECK: Phi [<<Val1>>,<<Val2>>] is_catch_phi:true
+
+.method public static testCatchPhi_singleSlot(ZZ)I
+ .registers 6
+
+ sget-object v0, LRuntime;->intArray:[I
+ const/4 v1, 0
+ aget v1, v0, v1
+ const/4 v2, 1
+ aget v2, v0, v2
+ const/4 v3, 2
+ aget v3, v0, v3
+
+ # Insert a try/catch to force v1,v2,v3 to spill.
+ :try_start_spill
+ const/4 v0, 1
+ invoke-static {v0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end_spill
+ .catchall {:try_start_spill .. :try_end_spill} :catch_all_spill
+ return v0 # Unreachable
+ :catch_all_spill # Catch and continue
+
+ :try_start
+ move v0, v1 # Set catch phi value
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+
+ move v0, v2 # Set catch phi value
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ return v3 # Normal path return.
+
+ :catch_all
+ return v0 # Exceptional path #1/#2 return.
+.end method
+
+# Test catch-phi runtime support for 64-bit values stored on the stack.
+
+# Register pair (v0, v1) holds different double values at two throwing instructions.
+# These values were forced to spill by an always-throwing try/catch after their
+# definition. Runtime is expected to find their location in the stack map and
+# copy the value to the location of the catch phi. The value is converted to int
+# and returned. Values were chosen so that all 64 bits are used.
+
+## CHECK-START: int Runtime.testCatchPhi_doubleSlot(boolean, boolean) register (after)
+## CHECK: <<Val1:d\d+>> ArrayGet
+## CHECK-NEXT: ParallelMove moves:[{{.*->}}2x{{\d+}}(sp)]
+## CHECK: <<Val2:d\d+>> ArrayGet
+## CHECK-NEXT: ParallelMove moves:[{{.*->}}2x{{\d+}}(sp)]
+## CHECK: Phi [<<Val1>>,<<Val2>>] is_catch_phi:true
+
+.method public static testCatchPhi_doubleSlot(ZZ)I
+ .registers 10
+
+ sget-object v0, LRuntime;->doubleArray:[D
+ const/4 v2, 0
+ aget-wide v2, v0, v2
+ const/4 v4, 1
+ aget-wide v4, v0, v4
+ const/4 v6, 2
+ aget-wide v6, v0, v6
+
+ # Insert a try/catch to force (v2, v3), (v4, v5), (v6, v7) to spill.
+ :try_start_spill
+ const/4 v0, 1
+ invoke-static {v0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end_spill
+ .catchall {:try_start_spill .. :try_end_spill} :catch_all_spill
+ return v0 # Unreachable
+ :catch_all_spill # Catch and continue
+
+ :try_start
+ move-wide v0, v2 # Set catch phi value
+ invoke-static {p0}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+
+ move-wide v0, v4 # Set catch phi value
+ invoke-static {p1}, LRuntime;->$noinline$ThrowIfTrue(Z)V
+ :try_end
+ .catchall {:try_start .. :try_end} :catch_all
+
+ double-to-int v6, v6
+ return v6 # Normal path return.
+
+ :catch_all
+ double-to-int v0, v0
+ return v0 # Exceptional path #1/#2 return.
+.end method
+
+
+
+# Helper methods and initialization.
+
+.method public static $noinline$ThrowIfTrue(Z)V
+ .registers 2
+ if-nez p0, :throw
+ return-void
+
+ :throw
+ new-instance v0, Ljava/lang/Exception;
+ invoke-direct {v0}, Ljava/lang/Exception;-><init>()V
+ throw v0
+.end method
+
+.method public static constructor <clinit>()V
+ .registers 2
+
+ const/4 v1, 4
+
+ new-array v0, v1, [I
+ fill-array-data v0, :array_int
+ sput-object v0, LRuntime;->intArray:[I
+
+ new-array v0, v1, [J
+ fill-array-data v0, :array_long
+ sput-object v0, LRuntime;->longArray:[J
+
+ new-array v0, v1, [F
+ fill-array-data v0, :array_float
+ sput-object v0, LRuntime;->floatArray:[F
+
+ new-array v0, v1, [D
+ fill-array-data v0, :array_double
+ sput-object v0, LRuntime;->doubleArray:[D
+
+ return-void
+
+:array_int
+.array-data 4
+ 0x03 # int 3
+ 0x08 # int 8
+ 0x2a # int 42
+.end array-data
+
+:array_long
+.array-data 8
+ 0x0000000100000002L # long (1 << 32) + 2
+ 0x0000000500000003L # long (5 << 32) + 3
+ 0x0000001e0000000cL # long (30 << 32) + 12
+.end array-data
+
+:array_float
+.array-data 4
+ 0x40400000 # float 3
+ 0x41000000 # float 8
+ 0x42280000 # float 42
+.end array-data
+
+:array_double
+.array-data 8
+ 0x400b333333333333L # double 3.4
+ 0x4020cccccccccccdL # double 8.4
+ 0x4045333333333333L # double 42.4
+.end array-data
+.end method
+
+.field public static intArray:[I
+.field public static longArray:[J
+.field public static floatArray:[F
+.field public static doubleArray:[D
diff --git a/test/510-checker-try-catch/src/Main.java b/test/510-checker-try-catch/src/Main.java
index ae78ba0..25cdc0e 100644
--- a/test/510-checker-try-catch/src/Main.java
+++ b/test/510-checker-try-catch/src/Main.java
@@ -14,10 +14,55 @@
* limitations under the License.
*/
+import java.lang.reflect.Method;
+
public class Main {
// Workaround for b/18051191.
class InnerClass {}
- public static void main(String[] args) {}
+ public enum TestPath {
+ ExceptionalFlow1(true, false, 3),
+ ExceptionalFlow2(false, true, 8),
+ NormalFlow(false, false, 42);
+
+ TestPath(boolean arg1, boolean arg2, int expected) {
+ this.arg1 = arg1;
+ this.arg2 = arg2;
+ this.expected = expected;
+ }
+
+ public boolean arg1;
+ public boolean arg2;
+ public int expected;
+ }
+
+ public static void testMethod(String method) throws Exception {
+ Class<?> c = Class.forName("Runtime");
+ Method m = c.getMethod(method, new Class[] { boolean.class, boolean.class });
+
+ for (TestPath path : TestPath.values()) {
+ Object[] arguments = new Object[] { path.arg1, path.arg2 };
+ int actual = (Integer) m.invoke(null, arguments);
+
+ if (actual != path.expected) {
+ throw new Error("Method: \"" + method + "\", path: " + path + ", " +
+ "expected: " + path.expected + ", actual: " + actual);
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ testMethod("testUseAfterCatch_int");
+ testMethod("testUseAfterCatch_long");
+ testMethod("testUseAfterCatch_float");
+ testMethod("testUseAfterCatch_double");
+ testMethod("testCatchPhi_const");
+ testMethod("testCatchPhi_int");
+ testMethod("testCatchPhi_long");
+ testMethod("testCatchPhi_float");
+ testMethod("testCatchPhi_double");
+ testMethod("testCatchPhi_singleSlot");
+ testMethod("testCatchPhi_doubleSlot");
+ }
}
diff --git a/test/530-checker-regression-reftype-final/expected.txt b/test/530-checker-regression-reftype-final/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/530-checker-regression-reftype-final/expected.txt
diff --git a/test/530-checker-regression-reftype-final/info.txt b/test/530-checker-regression-reftype-final/info.txt
new file mode 100644
index 0000000..07789d6
--- /dev/null
+++ b/test/530-checker-regression-reftype-final/info.txt
@@ -0,0 +1 @@
+Regression test for optimizing that used assume that array types are always exact.
diff --git a/test/530-checker-regression-reftype-final/smali/TestCase.smali b/test/530-checker-regression-reftype-final/smali/TestCase.smali
new file mode 100644
index 0000000..8fd7bb7
--- /dev/null
+++ b/test/530-checker-regression-reftype-final/smali/TestCase.smali
@@ -0,0 +1,59 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+.super Ljava/lang/Object;
+
+# Inliner used to assign exact type to the artificial multiple-return phi if the
+# class type was final which does not hold for arrays.
+
+# The type information is only used by recursive calls to the inliner and is
+# overwritten by the next pass of reference type propagation. Since we do not
+# inline any methods from array classes, this bug cannot be triggered and we
+# verify it using Checker.
+
+## CHECK-START: void TestCase.testInliner() reference_type_propagation_after_inlining (before)
+## CHECK-DAG: CheckCast [<<Phi:l\d+>>,{{l\d+}}]
+## CHECK-DAG: <<Phi>> Phi klass:java.lang.Object[] exact:false
+
+.method public static testInliner()V
+ .registers 3
+
+ invoke-static {}, Ljava/lang/System;->nanoTime()J
+ move-result-wide v0
+ long-to-int v0, v0
+
+ invoke-static {v0}, LTestCase;->$inline$getArray(I)[Ljava/lang/Object;
+ move-result-object v0
+
+ check-cast v0, [LMain$MyClassA;
+ return-void
+
+.end method
+
+.method public static $inline$getArray(I)[Ljava/lang/Object;
+ .registers 2
+ if-eqz p0, :else
+
+ :then
+ const/4 v0, 2
+ new-array v0, v0, [LMain$MyClassA;
+ return-object v0
+
+ :else
+ const/4 v0, 3
+ new-array v0, v0, [LMain$MyClassB;
+ return-object v0
+
+.end method
diff --git a/test/530-checker-regression-reftype-final/src/Main.java b/test/530-checker-regression-reftype-final/src/Main.java
new file mode 100644
index 0000000..f86b515
--- /dev/null
+++ b/test/530-checker-regression-reftype-final/src/Main.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ class MyClassA {}
+ class MyClassB extends MyClassA {}
+
+ public static void main(String[] args) throws Exception {
+ testReferenceTypePropagation();
+ invokeTestInliner();
+ }
+
+ // Reference type propagation (RTP) used to assume that if a class is final,
+ // then the type must be exact. This does not hold for arrays which are always
+ // final, i.e. not extendable, but may be assigned to from values of the
+ // components type subclasses.
+
+ public static void testReferenceTypePropagation() throws Exception {
+ boolean expectTrue;
+
+ // Bug #1: RTP would set the type of `array` to exact Object[]. Instruction
+ // simplifier would then simplify the instanceof to `false`.
+ Object[] array = $noinline$getArray();
+ expectTrue = array instanceof MyClassA[];
+ if (!expectTrue) {
+ throw new Exception("Incorrect type check.");
+ }
+
+ // Bug #2: This is the true-branch of the instanceof above. The bound type
+ // for `array` would be again set to exact MyClassA[] and incorrectly
+ // simplify the second instanceof to `false`.
+ expectTrue = array instanceof MyClassB[];
+ if (!expectTrue) {
+ throw new Exception("Incorrect type bound.");
+ }
+ }
+
+ public static void invokeTestInliner() throws Exception {
+ Class<?> c = Class.forName("TestCase");
+ Method m = c.getMethod("testInliner");
+ m.invoke(null);
+ }
+
+ public static Object[] $noinline$getArray() {
+ if (doThrow) throw new Error();
+ return new MyClassB[2];
+ }
+
+ static boolean doThrow = false;
+}
diff --git a/test/998-scoped-primitive-array/check b/test/998-scoped-primitive-array/check
deleted file mode 100755
index 842bdc6..0000000
--- a/test/998-scoped-primitive-array/check
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Check that the string "error" isn't present
-if grep error "$2"; then
- exit 1
-else
- exit 0
-fi
diff --git a/test/998-scoped-primitive-array/expected.txt b/test/998-scoped-primitive-array/expected.txt
deleted file mode 100644
index a965a70..0000000
--- a/test/998-scoped-primitive-array/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-Done
diff --git a/test/998-scoped-primitive-array/scoped_primitive_array.cc b/test/998-scoped-primitive-array/scoped_primitive_array.cc
deleted file mode 100644
index c224a06..0000000
--- a/test/998-scoped-primitive-array/scoped_primitive_array.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jni.h"
-#include "ScopedPrimitiveArray.h"
-
-extern "C" JNIEXPORT jlong JNICALL Java_Main_measureByteArray(JNIEnv* env,
- jclass,
- jlong reps,
- jbyteArray arr) {
- jlong ret = 0;
- for (jlong i = 0; i < reps; ++i) {
- ScopedByteArrayRO sc(env, arr);
- ret += sc[0] + sc[sc.size() - 1];
- }
- return ret;
-}
-
-extern "C" JNIEXPORT jlong JNICALL Java_Main_measureShortArray(JNIEnv* env,
- jclass,
- jlong reps,
- jshortArray arr) {
- jlong ret = 0;
- for (jlong i = 0; i < reps; ++i) {
- ScopedShortArrayRO sc(env, arr);
- ret += sc[0] + sc[sc.size() - 1];
- }
- return ret;
-}
-
-extern "C" JNIEXPORT jlong JNICALL Java_Main_measureIntArray(JNIEnv* env,
- jclass,
- jlong reps,
- jintArray arr) {
- jlong ret = 0;
- for (jlong i = 0; i < reps; ++i) {
- ScopedIntArrayRO sc(env, arr);
- ret += sc[0] + sc[sc.size() - 1];
- }
- return ret;
-}
-
-extern "C" JNIEXPORT jlong JNICALL Java_Main_measureLongArray(JNIEnv* env,
- jclass,
- jlong reps,
- jlongArray arr) {
- jlong ret = 0;
- for (jlong i = 0; i < reps; ++i) {
- ScopedLongArrayRO sc(env, arr);
- ret += sc[0] + sc[sc.size() - 1];
- }
- return ret;
-}
diff --git a/test/998-scoped-primitive-array/src/Main.java b/test/998-scoped-primitive-array/src/Main.java
deleted file mode 100644
index 630e0dc..0000000
--- a/test/998-scoped-primitive-array/src/Main.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
- public Main() {}
-
- // Measure adds the first and last element of the array by using ScopedPrimitiveArray.
- static native long measureByteArray(long reps, byte[] arr);
- static native long measureShortArray(long reps, short[] arr);
- static native long measureIntArray(long reps, int[] arr);
- static native long measureLongArray(long reps, long[] arr);
-
- static void checkEq(long expected, long value) {
- if (expected != value) {
- System.out.println("error: Expected " + expected + " but got " + value);
- }
- }
-
- static void runPerfTest(long reps) {
- for (int length = 1; length <= 8192; length *= 8) {
- byte[] bytes = new byte[length];
- bytes[0] = 1;
- bytes[length - 1] = 2;
- short[] shorts = new short[length];
- shorts[0] = 1;
- shorts[length - 1] = 2;
- int[] ints = new int[length];
- ints[0] = 1;
- ints[length - 1] = 2;
- long[] longs = new long[length];
- longs[0] = 1;
- longs[length - 1] = 2;
- long value = 0;
- long elapsed = 0;
- long start = 0;
-
- start = System.nanoTime();
- value = measureByteArray(reps, bytes);
- elapsed = System.nanoTime() - start;
- System.out.println("Byte length=" + length + " ns/op=" + (double) elapsed / reps);
- checkEq(value, reps * (long) (bytes[0] + bytes[length - 1]));
-
- start = System.nanoTime();
- value = measureShortArray(reps, shorts);
- elapsed = System.nanoTime() - start;
- System.out.println("Short length=" + length + " ns/op=" + (double) elapsed / reps);
- checkEq(value, reps * (long) (shorts[0] + shorts[length - 1]));
-
- start = System.nanoTime();
- value = measureIntArray(reps, ints);
- elapsed = System.nanoTime() - start;
- System.out.println("Int length=" + length + " ns/op=" + (double) elapsed / reps);
- checkEq(value, reps * (ints[0] + ints[length - 1]));
-
- start = System.nanoTime();
- value = measureLongArray(reps, longs);
- elapsed = System.nanoTime() - start;
- System.out.println("Long length=" + length + " ns/op=" + (double) elapsed / reps);
- checkEq(value, reps * (longs[0] + longs[length - 1]));
- }
- }
-
- public static void main(String[] args) {
- System.loadLibrary(args[0]);
- long iterations = 2000000;
- if (args.length > 1) {
- iterations = Long.parseLong(args[1], 10);
- }
- runPerfTest(iterations);
- System.out.println("Done");
- }
-}
diff --git a/test/999-jni-perf/check b/test/999-jni-perf/check
deleted file mode 100755
index ffbb8cf..0000000
--- a/test/999-jni-perf/check
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Only compare the last line.
-tail -n 1 "$2" | diff --strip-trailing-cr -q "$1" - >/dev/null
\ No newline at end of file
diff --git a/test/999-jni-perf/expected.txt b/test/999-jni-perf/expected.txt
deleted file mode 100644
index a965a70..0000000
--- a/test/999-jni-perf/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-Done
diff --git a/test/999-jni-perf/src/Main.java b/test/999-jni-perf/src/Main.java
deleted file mode 100644
index 032e700..0000000
--- a/test/999-jni-perf/src/Main.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
- public Main() {
- }
-
- private static final String MSG = "ABCDE";
-
- native int perfJniEmptyCall();
- native int perfSOACall();
- native int perfSOAUncheckedCall();
-
- int runPerfTest(long N) {
- long start = System.nanoTime();
- for (long i = 0; i < N; i++) {
- char c = MSG.charAt(2);
- }
- long elapse = System.nanoTime() - start;
- System.out.println("Fast JNI (charAt): " + (double)elapse / N);
-
- start = System.nanoTime();
- for (long i = 0; i < N; i++) {
- perfJniEmptyCall();
- }
- elapse = System.nanoTime() - start;
- System.out.println("Empty call: " + (double)elapse / N);
-
- start = System.nanoTime();
- for (long i = 0; i < N; i++) {
- perfSOACall();
- }
- elapse = System.nanoTime() - start;
- System.out.println("SOA call: " + (double)elapse / N);
-
- start = System.nanoTime();
- for (long i = 0; i < N; i++) {
- perfSOAUncheckedCall();
- }
- elapse = System.nanoTime() - start;
- System.out.println("SOA unchecked call: " + (double)elapse / N);
-
- return 0;
- }
-
- public static void main(String[] args) {
- System.loadLibrary(args[0]);
- long iterations = 1000000;
- if (args.length > 1) {
- iterations = Long.parseLong(args[1], 10);
- }
- Main m = new Main();
- m.runPerfTest(iterations);
- System.out.println("Done");
- }
-}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index af945fb..7f05a04 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -38,9 +38,7 @@
457-regs/regs_jni.cc \
461-get-reference-vreg/get_reference_vreg_jni.cc \
466-get-live-vreg/get_live_vreg_jni.cc \
- 497-inlining-and-class-loader/clear_dex_cache.cc \
- 998-scoped-primitive-array/scoped_primitive_array.cc \
- 999-jni-perf/perf-jni.cc
+ 497-inlining-and-class-loader/clear_dex_cache.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so