Merge "Rename target.linux[_x86[_64]] to target.linux_glibc[_x86[_64]]"
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index f4f8d49..3247e54 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -84,12 +84,6 @@
HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
-# Classpath for Jack compilation: we only need core-libart.
-HOST_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack
-HOST_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack)
-TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack
-TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack)
-
ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_CORE_JARS),$(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 1ae79ac..37e6d42 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -202,9 +202,9 @@
# $(5): a make variable used to collate target dependencies, e.g ART_TEST_TARGET_OAT_HelloWorld_DEX
# $(6): a make variable used to collate host dependencies, e.g ART_TEST_HOST_OAT_HelloWorld_DEX
#
-# If the input test directory contains a file called main.list and main.jpp,
+# If the input test directory contains a file called main.list,
# then a multi-dex file is created passing main.list as the --main-dex-list
-# argument to dx and main.jpp for Jack.
+# argument to dx.
define build-art-test-dex
ifeq ($(ART_BUILD_TARGET),true)
include $(CLEAR_VARS)
@@ -219,7 +219,6 @@
LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
- LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
endif
include $(BUILD_JAVA_LIBRARY)
$(5) := $$(LOCAL_INSTALLED_MODULE)
@@ -235,7 +234,6 @@
LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
- LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
endif
include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
$(6) := $$(LOCAL_INSTALLED_MODULE)
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 1a2494a..cf1cbd5 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -360,18 +360,36 @@
}
// Shift operations implicitly mask the shift amount according to the type width. Get rid of
- // unnecessary explicit masking operations on the shift amount.
+ // unnecessary And/Or/Xor/Add/Sub/TypeConversion operations on the shift amount that do not
+ // affect the relevant bits.
// Replace code looking like
- // AND masked_shift, shift, <superset of implicit mask>
- // SHL dst, value, masked_shift
+ // AND adjusted_shift, shift, <superset of implicit mask>
+ // [OR/XOR/ADD/SUB adjusted_shift, shift, <value not overlapping with implicit mask>]
+ // [<conversion-from-integral-non-64-bit-type> adjusted_shift, shift]
+ // SHL dst, value, adjusted_shift
// with
// SHL dst, value, shift
- if (shift_amount->IsAnd()) {
- HAnd* and_insn = shift_amount->AsAnd();
- HConstant* mask = and_insn->GetConstantRight();
- if ((mask != nullptr) && ((Int64FromConstant(mask) & implicit_mask) == implicit_mask)) {
- instruction->ReplaceInput(and_insn->GetLeastConstantLeft(), 1);
+ if (shift_amount->IsAnd() ||
+ shift_amount->IsOr() ||
+ shift_amount->IsXor() ||
+ shift_amount->IsAdd() ||
+ shift_amount->IsSub()) {
+ int64_t required_result = shift_amount->IsAnd() ? implicit_mask : 0;
+ HBinaryOperation* bin_op = shift_amount->AsBinaryOperation();
+ HConstant* mask = bin_op->GetConstantRight();
+ if (mask != nullptr && (Int64FromConstant(mask) & implicit_mask) == required_result) {
+ instruction->ReplaceInput(bin_op->GetLeastConstantLeft(), 1);
RecordSimplification();
+ return;
+ }
+ } else if (shift_amount->IsTypeConversion()) {
+ DCHECK_NE(shift_amount->GetType(), DataType::Type::kBool); // We never convert to bool.
+ DataType::Type source_type = shift_amount->InputAt(0)->GetType();
+ // Non-integral and 64-bit source types require an explicit type conversion.
+ if (DataType::IsIntegralType(source_type) && !DataType::Is64BitType(source_type)) {
+ instruction->ReplaceInput(shift_amount->AsTypeConversion()->GetInput(), 1);
+ RecordSimplification();
+ return;
}
}
}
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index efd7cb4..7439893 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -30,6 +30,57 @@
namespace arm {
+class InstructionSimplifierArmVisitor : public HGraphVisitor {
+ public:
+ InstructionSimplifierArmVisitor(HGraph* graph, OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph), stats_(stats) {}
+
+ private:
+ void RecordSimplification() {
+ if (stats_ != nullptr) {
+ stats_->RecordStat(kInstructionSimplificationsArch);
+ }
+ }
+
+ bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
+ bool TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge);
+ bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+ }
+ bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
+ DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+ }
+
+ /**
+ * This simplifier uses a special-purpose BB visitor.
+ * (1) No need to visit Phi nodes.
+ * (2) Since statements can be removed in a "forward" fashion,
+ * the visitor should test if each statement is still there.
+ */
+ void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ // TODO: fragile iteration, provide more robust iterators?
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInBlock()) {
+ instruction->Accept(this);
+ }
+ }
+ }
+
+ void VisitAnd(HAnd* instruction) OVERRIDE;
+ void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
+ void VisitArraySet(HArraySet* instruction) OVERRIDE;
+ void VisitMul(HMul* instruction) OVERRIDE;
+ void VisitOr(HOr* instruction) OVERRIDE;
+ void VisitShl(HShl* instruction) OVERRIDE;
+ void VisitShr(HShr* instruction) OVERRIDE;
+ void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
+ void VisitUShr(HUShr* instruction) OVERRIDE;
+
+ OptimizingCompilerStats* stats_;
+};
+
bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
bool do_merge) {
@@ -234,5 +285,10 @@
}
}
+void InstructionSimplifierArm::Run() {
+ InstructionSimplifierArmVisitor visitor(graph_, stats_);
+ visitor.VisitReversePostOrder();
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index e2ed257..2f65729 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -23,58 +23,6 @@
namespace art {
namespace arm {
-class InstructionSimplifierArmVisitor : public HGraphVisitor {
- public:
- InstructionSimplifierArmVisitor(HGraph* graph, OptimizingCompilerStats* stats)
- : HGraphVisitor(graph), stats_(stats) {}
-
- private:
- void RecordSimplification() {
- if (stats_ != nullptr) {
- stats_->RecordStat(kInstructionSimplificationsArch);
- }
- }
-
- bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
- bool TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge);
- bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
- }
- bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
- }
-
- /**
- * This simplifier uses a special-purpose BB visitor.
- * (1) No need to visit Phi nodes.
- * (2) Since statements can be removed in a "forward" fashion,
- * the visitor should test if each statement is still there.
- */
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
- // TODO: fragile iteration, provide more robust iterators?
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (instruction->IsInBlock()) {
- instruction->Accept(this);
- }
- }
- }
-
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
-
- OptimizingCompilerStats* stats_;
-};
-
-
class InstructionSimplifierArm : public HOptimization {
public:
InstructionSimplifierArm(HGraph* graph, OptimizingCompilerStats* stats)
@@ -82,10 +30,7 @@
static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
- void Run() OVERRIDE {
- InstructionSimplifierArmVisitor visitor(graph_, stats_);
- visitor.VisitReversePostOrder();
- }
+ void Run() OVERRIDE;
};
} // namespace arm
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 1c3b79d..c639953 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -30,6 +30,63 @@
using helpers::ShifterOperandSupportsExtension;
+class InstructionSimplifierArm64Visitor : public HGraphVisitor {
+ public:
+ InstructionSimplifierArm64Visitor(HGraph* graph, OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph), stats_(stats) {}
+
+ private:
+ void RecordSimplification() {
+ if (stats_ != nullptr) {
+ stats_->RecordStat(kInstructionSimplificationsArch);
+ }
+ }
+
+ bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
+ bool TryMergeIntoShifterOperand(HInstruction* use,
+ HInstruction* bitfield_op,
+ bool do_merge);
+ bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+ }
+ bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
+ DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+ }
+
+ /**
+ * This simplifier uses a special-purpose BB visitor.
+ * (1) No need to visit Phi nodes.
+ * (2) Since statements can be removed in a "forward" fashion,
+ * the visitor should test if each statement is still there.
+ */
+ void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ // TODO: fragile iteration, provide more robust iterators?
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInBlock()) {
+ instruction->Accept(this);
+ }
+ }
+ }
+
+ // HInstruction visitors, sorted alphabetically.
+ void VisitAnd(HAnd* instruction) OVERRIDE;
+ void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
+ void VisitArraySet(HArraySet* instruction) OVERRIDE;
+ void VisitMul(HMul* instruction) OVERRIDE;
+ void VisitOr(HOr* instruction) OVERRIDE;
+ void VisitShl(HShl* instruction) OVERRIDE;
+ void VisitShr(HShr* instruction) OVERRIDE;
+ void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
+ void VisitUShr(HUShr* instruction) OVERRIDE;
+ void VisitXor(HXor* instruction) OVERRIDE;
+ void VisitVecLoad(HVecLoad* instruction) OVERRIDE;
+ void VisitVecStore(HVecStore* instruction) OVERRIDE;
+
+ OptimizingCompilerStats* stats_;
+};
+
bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
bool do_merge) {
@@ -223,5 +280,10 @@
}
}
+void InstructionSimplifierArm64::Run() {
+ InstructionSimplifierArm64Visitor visitor(graph_, stats_);
+ visitor.VisitReversePostOrder();
+}
+
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 4f16fc3..d180a8d 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -23,64 +23,6 @@
namespace art {
namespace arm64 {
-class InstructionSimplifierArm64Visitor : public HGraphVisitor {
- public:
- InstructionSimplifierArm64Visitor(HGraph* graph, OptimizingCompilerStats* stats)
- : HGraphVisitor(graph), stats_(stats) {}
-
- private:
- void RecordSimplification() {
- if (stats_ != nullptr) {
- stats_->RecordStat(kInstructionSimplificationsArch);
- }
- }
-
- bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
- bool TryMergeIntoShifterOperand(HInstruction* use,
- HInstruction* bitfield_op,
- bool do_merge);
- bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
- }
- bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
- }
-
- /**
- * This simplifier uses a special-purpose BB visitor.
- * (1) No need to visit Phi nodes.
- * (2) Since statements can be removed in a "forward" fashion,
- * the visitor should test if each statement is still there.
- */
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
- // TODO: fragile iteration, provide more robust iterators?
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (instruction->IsInBlock()) {
- instruction->Accept(this);
- }
- }
- }
-
- // HInstruction visitors, sorted alphabetically.
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
- void VisitXor(HXor* instruction) OVERRIDE;
- void VisitVecLoad(HVecLoad* instruction) OVERRIDE;
- void VisitVecStore(HVecStore* instruction) OVERRIDE;
-
- OptimizingCompilerStats* stats_;
-};
-
-
class InstructionSimplifierArm64 : public HOptimization {
public:
InstructionSimplifierArm64(HGraph* graph, OptimizingCompilerStats* stats)
@@ -88,10 +30,7 @@
static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64";
- void Run() OVERRIDE {
- InstructionSimplifierArm64Visitor visitor(graph_, stats_);
- visitor.VisitReversePostOrder();
- }
+ void Run() OVERRIDE;
};
} // namespace arm64
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 11725f4..daec634 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -30,6 +30,16 @@
namespace art {
+// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags.
+#define CHECK_INTRINSICS_ENUM_VALUES(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
+ static_assert( \
+ static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \
+ "Instrinsics enumeration space overflow: ");
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES)
+#undef INTRINSICS_LIST
+#undef CHECK_INTRINSICS_ENUM_VALUES
+
// Function that returns whether an intrinsic is static/direct or virtual.
static inline InvokeType GetIntrinsicInvokeType(Intrinsics i) {
switch (i) {
@@ -109,6 +119,7 @@
// InvokeStaticOrDirect.
InvokeType intrinsic_type = GetIntrinsicInvokeType(intrinsic);
InvokeType invoke_type = invoke->GetInvokeType();
+
switch (intrinsic_type) {
case kStatic:
return (invoke_type == kStatic);
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 7e37018..fec64e2 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -74,19 +74,16 @@
// Forward declaration.
static bool IsZeroExtensionAndGet(HInstruction* instruction,
DataType::Type type,
- /*out*/ HInstruction** operand,
- bool to64 = false);
+ /*out*/ HInstruction** operand);
-// Detect a sign extension in instruction from the given type. The to64 parameter
-// denotes if result is long, and thus sign extension from int can be included.
+// Detect a sign extension in instruction from the given type.
// Returns the promoted operand on success.
static bool IsSignExtensionAndGet(HInstruction* instruction,
DataType::Type type,
- /*out*/ HInstruction** operand,
- bool to64 = false) {
+ /*out*/ HInstruction** operand) {
// Accept any already wider constant that would be handled properly by sign
// extension when represented in the *width* of the given narrower data type
- // (the fact that char normally zero extends does not matter here).
+ // (the fact that Uint16 normally zero extends does not matter here).
int64_t value = 0;
if (IsInt64AndGet(instruction, /*out*/ &value)) {
switch (type) {
@@ -103,43 +100,39 @@
return true;
}
return false;
- case DataType::Type::kInt32:
- if (IsInt<32>(value)) {
- *operand = instruction;
- return to64;
- }
- return false;
default:
return false;
}
}
- // An implicit widening conversion of a signed integer to an integral type sign-extends
- // the two's-complement representation of the integer value to fill the wider format.
- if (instruction->GetType() == type && (instruction->IsArrayGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsInstanceFieldGet())) {
+ // An implicit widening conversion of any signed expression sign-extends.
+ if (instruction->GetType() == type) {
switch (type) {
case DataType::Type::kInt8:
case DataType::Type::kInt16:
*operand = instruction;
return true;
- case DataType::Type::kInt32:
- *operand = instruction;
- return to64;
default:
return false;
}
}
- // Explicit type conversions.
+ // An explicit widening conversion of a signed expression sign-extends.
if (instruction->IsTypeConversion()) {
- DataType::Type from = instruction->InputAt(0)->GetType();
+ HInstruction* conv = instruction->InputAt(0);
+ DataType::Type from = conv->GetType();
switch (instruction->GetType()) {
+ case DataType::Type::kInt32:
case DataType::Type::kInt64:
- return IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand, /*to64*/ true);
+ if (type == from && (from == DataType::Type::kInt8 ||
+ from == DataType::Type::kInt16 ||
+ from == DataType::Type::kInt32)) {
+ *operand = conv;
+ return true;
+ }
+ return false;
case DataType::Type::kInt16:
return type == DataType::Type::kUint16 &&
from == DataType::Type::kUint16 &&
- IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand, to64);
+ IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
default:
return false;
}
@@ -147,16 +140,14 @@
return false;
}
-// Detect a zero extension in instruction from the given type. The to64 parameter
-// denotes if result is long, and thus zero extension from int can be included.
+// Detect a zero extension in instruction from the given type.
// Returns the promoted operand on success.
static bool IsZeroExtensionAndGet(HInstruction* instruction,
DataType::Type type,
- /*out*/ HInstruction** operand,
- bool to64) {
+ /*out*/ HInstruction** operand) {
// Accept any already wider constant that would be handled properly by zero
// extension when represented in the *width* of the given narrower data type
- // (the fact that byte/short/int normally sign extend does not matter here).
+ // (the fact that Int8/Int16 normally sign extend does not matter here).
int64_t value = 0;
if (IsInt64AndGet(instruction, /*out*/ &value)) {
switch (type) {
@@ -173,21 +164,12 @@
return true;
}
return false;
- case DataType::Type::kInt32:
- if (IsUint<32>(value)) {
- *operand = instruction;
- return to64;
- }
- return false;
default:
return false;
}
}
- // An implicit widening conversion of a char to an integral type zero-extends
- // the representation of the char value to fill the wider format.
- if (instruction->GetType() == type && (instruction->IsArrayGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsInstanceFieldGet())) {
+ // An implicit widening conversion of any unsigned expression zero-extends.
+ if (instruction->GetType() == type) {
if (type == DataType::Type::kUint16) {
*operand = instruction;
return true;
@@ -195,6 +177,9 @@
}
// A sign (or zero) extension followed by an explicit removal of just the
// higher sign bits is equivalent to a zero extension of the underlying operand.
+ //
+ // TODO: move this into simplifier and use new type system instead.
+ //
if (instruction->IsAnd()) {
int64_t mask = 0;
HInstruction* a = instruction->InputAt(0);
@@ -210,22 +195,26 @@
case DataType::Type::kUint16:
case DataType::Type::kInt16:
return mask == std::numeric_limits<uint16_t>::max();
- case DataType::Type::kInt32:
- return mask == std::numeric_limits<uint32_t>::max() && to64;
default: return false;
}
}
}
- // Explicit type conversions.
+ // An explicit widening conversion of an unsigned expression zero-extends.
if (instruction->IsTypeConversion()) {
- DataType::Type from = instruction->InputAt(0)->GetType();
+ HInstruction* conv = instruction->InputAt(0);
+ DataType::Type from = conv->GetType();
switch (instruction->GetType()) {
+ case DataType::Type::kInt32:
case DataType::Type::kInt64:
- return IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand, /*to64*/ true);
+ if (type == from && from == DataType::Type::kUint16) {
+ *operand = conv;
+ return true;
+ }
+ return false;
case DataType::Type::kUint16:
return type == DataType::Type::kInt16 &&
from == DataType::Type::kInt16 &&
- IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand, to64);
+ IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
default:
return false;
}
@@ -360,6 +349,22 @@
return false;
}
+// Detect a + c for constant c.
+static bool IsAddConst(HInstruction* instruction,
+ /*out*/ HInstruction** a,
+ /*out*/ int64_t* c) {
+ if (instruction->IsAdd()) {
+ if (IsInt64AndGet(instruction->InputAt(0), c)) {
+ *a = instruction->InputAt(1);
+ return true;
+ } else if (IsInt64AndGet(instruction->InputAt(1), c)) {
+ *a = instruction->InputAt(0);
+ return true;
+ }
+ }
+ return false;
+}
+
// Detect reductions of the following forms,
// x = x_phi + ..
// x = x_phi - ..
@@ -1148,6 +1153,7 @@
size_t size_vec = DataType::Size(type);
size_t size_from = DataType::Size(from);
size_t size_to = DataType::Size(to);
+ DataType::Type ctype = size_from == size_vec ? from : type;
// Accept an integral conversion
// (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or
// (1b) widening from at least vector type, and
@@ -1157,7 +1163,7 @@
VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) ||
(size_to >= size_from &&
size_from >= size_vec &&
- VectorizeUse(node, opa, generate_code, type, restrictions))) {
+ VectorizeUse(node, opa, generate_code, ctype, restrictions))) {
if (generate_code) {
if (vector_mode_ == kVector) {
vector_map_->Put(instruction, vector_map_->Get(opa)); // operand pass-through
@@ -1896,9 +1902,14 @@
(v->AsInvokeStaticOrDirect()->GetIntrinsic() == Intrinsics::kMathAbsInt ||
v->AsInvokeStaticOrDirect()->GetIntrinsic() == Intrinsics::kMathAbsLong)) {
HInstruction* x = v->InputAt(0);
- if (x->IsSub() && x->GetType() == reduction_type) {
- a = x->InputAt(0);
- b = x->InputAt(1);
+ if (x->GetType() == reduction_type) {
+ int64_t c = 0;
+ if (x->IsSub()) {
+ a = x->InputAt(0);
+ b = x->InputAt(1);
+ } else if (IsAddConst(x, /*out*/ &a, /*out*/ &c)) {
+ b = graph_->GetConstant(reduction_type, -c); // hidden SUB!
+ }
}
}
if (a == nullptr || b == nullptr) {
@@ -1906,22 +1917,21 @@
}
// Accept same-type or consistent sign extension for narrower-type on operands a and b.
// The same-type or narrower operands are called r (a or lower) and s (b or lower).
+ // We inspect the operands carefully to pick the most suited type.
HInstruction* r = a;
HInstruction* s = b;
bool is_unsigned = false;
DataType::Type sub_type = a->GetType();
- if (a->IsTypeConversion()) {
- HInstruction* hunt = a;
- while (hunt->IsTypeConversion()) {
- hunt = hunt->InputAt(0);
- }
- sub_type = hunt->GetType();
- } else if (b->IsTypeConversion()) {
- HInstruction* hunt = a;
- while (hunt->IsTypeConversion()) {
- hunt = hunt->InputAt(0);
- }
- sub_type = hunt->GetType();
+ if (DataType::Size(b->GetType()) < DataType::Size(sub_type)) {
+ sub_type = b->GetType();
+ }
+ if (a->IsTypeConversion() &&
+ DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
+ sub_type = a->InputAt(0)->GetType();
+ }
+ if (b->IsTypeConversion() &&
+ DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
+ sub_type = b->InputAt(0)->GetType();
}
if (reduction_type != sub_type &&
(!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) {
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 1b4e910..5d9bf2c 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -1396,7 +1396,7 @@
linker->SetEntryPointsToInterpreter(&method);
method.SetCodeItemOffset(dex_file_->FindCodeItemOffset(class_def, dex_method_idx));
// Clear all the intrinsics related flags.
- method.ClearAccessFlags(art::kAccIntrinsic | (~art::kAccFlagsNotUsedByIntrinsic));
+ method.SetNotIntrinsic();
// Notify the jit that this method is redefined.
art::jit::Jit* jit = driver_->runtime_->GetJit();
if (jit != nullptr) {
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 7ff35ac..4181169 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -377,14 +377,14 @@
}
inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
- DCHECK(IsUint<8>(intrinsic));
// Currently we only do intrinsics for static/final methods or methods of final
// classes. We don't set kHasSingleImplementation for those methods.
DCHECK(IsStatic() || IsFinal() || GetDeclaringClass()->IsFinal()) <<
"Potential conflict with kAccSingleImplementation";
- uint32_t new_value = (GetAccessFlags() & kAccFlagsNotUsedByIntrinsic) |
- kAccIntrinsic |
- (intrinsic << POPCOUNT(kAccFlagsNotUsedByIntrinsic));
+ static const int kAccFlagsShift = CTZ(kAccIntrinsicBits);
+ DCHECK_LE(intrinsic, kAccIntrinsicBits >> kAccFlagsShift);
+ uint32_t intrinsic_bits = intrinsic << kAccFlagsShift;
+ uint32_t new_value = (GetAccessFlags() & ~kAccIntrinsicBits) | kAccIntrinsic | intrinsic_bits;
if (kIsDebugBuild) {
uint32_t java_flags = (GetAccessFlags() & kAccJavaFlagsMask);
bool is_constructor = IsConstructor();
diff --git a/runtime/art_method.h b/runtime/art_method.h
index fbdc32d..caef81c 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -117,26 +117,6 @@
access_flags_.store(new_access_flags, std::memory_order_relaxed);
}
- // This setter guarantees atomicity.
- void AddAccessFlags(uint32_t flag) {
- uint32_t old_access_flags;
- uint32_t new_access_flags;
- do {
- old_access_flags = access_flags_.load(std::memory_order_relaxed);
- new_access_flags = old_access_flags | flag;
- } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
- }
-
- // This setter guarantees atomicity.
- void ClearAccessFlags(uint32_t flag) {
- uint32_t old_access_flags;
- uint32_t new_access_flags;
- do {
- old_access_flags = access_flags_.load(std::memory_order_relaxed);
- new_access_flags = old_access_flags & ~flag;
- } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
- }
-
static MemberOffset AccessFlagsOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, access_flags_));
}
@@ -196,12 +176,21 @@
ALWAYS_INLINE void SetIntrinsic(uint32_t intrinsic) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetIntrinsic() {
+ static const int kAccFlagsShift = CTZ(kAccIntrinsicBits);
+ static_assert(IsPowerOfTwo((kAccIntrinsicBits >> kAccFlagsShift) + 1),
+ "kAccIntrinsicBits are not continuous");
+ static_assert((kAccIntrinsic & kAccIntrinsicBits) == 0,
+ "kAccIntrinsic overlaps kAccIntrinsicBits");
DCHECK(IsIntrinsic());
- return (GetAccessFlags() >> POPCOUNT(kAccFlagsNotUsedByIntrinsic)) & kAccMaxIntrinsic;
+ return (GetAccessFlags() & kAccIntrinsicBits) >> kAccFlagsShift;
+ }
+
+ void SetNotIntrinsic() REQUIRES_SHARED(Locks::mutator_lock_) {
+ ClearAccessFlags(kAccIntrinsic | kAccIntrinsicBits);
}
bool IsCopied() {
- static_assert((kAccCopied & kAccFlagsNotUsedByIntrinsic) == kAccCopied,
+ static_assert((kAccCopied & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
"kAccCopied conflicts with intrinsic modifier");
const bool copied = (GetAccessFlags() & kAccCopied) != 0;
// (IsMiranda() || IsDefaultConflicting()) implies copied
@@ -211,7 +200,7 @@
}
bool IsMiranda() {
- static_assert((kAccMiranda & kAccFlagsNotUsedByIntrinsic) == kAccMiranda,
+ static_assert((kAccMiranda & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
"kAccMiranda conflicts with intrinsic modifier");
return (GetAccessFlags() & kAccMiranda) != 0;
}
@@ -245,7 +234,7 @@
// This is set by the class linker.
bool IsDefault() {
- static_assert((kAccDefault & kAccFlagsNotUsedByIntrinsic) == kAccDefault,
+ static_assert((kAccDefault & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
"kAccDefault conflicts with intrinsic modifier");
return (GetAccessFlags() & kAccDefault) != 0;
}
@@ -290,6 +279,22 @@
AddAccessFlags(kAccSkipAccessChecks);
}
+ bool PreviouslyWarm() {
+ if (IsIntrinsic()) {
+ // kAccPreviouslyWarm overlaps with kAccIntrinsicBits.
+ return true;
+ }
+ return (GetAccessFlags() & kAccPreviouslyWarm) != 0;
+ }
+
+ void SetPreviouslyWarm() {
+ if (IsIntrinsic()) {
+ // kAccPreviouslyWarm overlaps with kAccIntrinsicBits.
+ return;
+ }
+ AddAccessFlags(kAccPreviouslyWarm);
+ }
+
// Should this method be run in the interpreter and count locks (e.g., failed structured-
// locking verification)?
bool MustCountLocks() {
@@ -299,6 +304,10 @@
return (GetAccessFlags() & kAccMustCountLocks) != 0;
}
+ void SetMustCountLocks() {
+ AddAccessFlags(kAccMustCountLocks);
+ }
+
// Checks to see if the method was annotated with @dalvik.annotation.optimization.FastNative
// -- Independent of kAccFastNative access flags.
bool IsAnnotatedWithFastNative();
@@ -782,6 +791,37 @@
template <ReadBarrierOption kReadBarrierOption> void GetAccessFlagsDCheck();
+ static inline bool IsValidIntrinsicUpdate(uint32_t modifier) {
+ return (((modifier & kAccIntrinsic) == kAccIntrinsic) &&
+ (((modifier & ~(kAccIntrinsic | kAccIntrinsicBits)) == 0)));
+ }
+
+ static inline bool OverlapsIntrinsicBits(uint32_t modifier) {
+ return (modifier & kAccIntrinsicBits) != 0;
+ }
+
+ // This setter guarantees atomicity.
+ void AddAccessFlags(uint32_t flag) {
+ DCHECK(!IsIntrinsic() || !OverlapsIntrinsicBits(flag) || IsValidIntrinsicUpdate(flag));
+ uint32_t old_access_flags;
+ uint32_t new_access_flags;
+ do {
+ old_access_flags = access_flags_.load(std::memory_order_relaxed);
+ new_access_flags = old_access_flags | flag;
+ } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
+ }
+
+ // This setter guarantees atomicity.
+ void ClearAccessFlags(uint32_t flag) {
+ DCHECK(!IsIntrinsic() || !OverlapsIntrinsicBits(flag) || IsValidIntrinsicUpdate(flag));
+ uint32_t old_access_flags;
+ uint32_t new_access_flags;
+ do {
+ old_access_flags = access_flags_.load(std::memory_order_relaxed);
+ new_access_flags = old_access_flags & ~flag;
+ } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
+ }
+
DISALLOW_COPY_AND_ASSIGN(ArtMethod); // Need to use CopyFrom to deal with 32 vs 64 bits.
};
diff --git a/runtime/image.cc b/runtime/image.cc
index 0236f47..4c6529b 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '8', '\0' }; // Map boot image tables.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '9', '\0' }; // 256 intrinsics
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 40a5212..ae08fe2 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -534,7 +534,7 @@
static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
if (was_warm) {
- method->AddAccessFlags(kAccPreviouslyWarm);
+ method->SetPreviouslyWarm();
}
// We reset the counter to 1 so that the profile knows that the method was executed at least once.
// This is required for layout purposes.
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index af6a45f..2bf8d8b 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -349,7 +349,7 @@
// Mark startup methods as hot if they have more than hot_method_sample_threshold
// samples. This means they will get compiled by the compiler driver.
if (method.GetProfilingInfo(kRuntimePointerSize) != nullptr ||
- (method.GetAccessFlags() & kAccPreviouslyWarm) != 0 ||
+ method.PreviouslyWarm() ||
counter >= hot_method_sample_threshold) {
hot_methods->AddReference(method.GetDexFile(), method.GetDexMethodIndex());
} else if (counter != 0) {
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 68ab4a4..4b790a0 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -79,6 +79,11 @@
// virtual call.
static constexpr uint32_t kAccSingleImplementation = 0x08000000; // method (runtime)
+// Not currently used, except for intrinsic methods where these bits
+// are part of the intrinsic ordinal.
+static constexpr uint32_t kAccMayBeUnusedBits = 0x70000000;
+
+// Set by the compiler driver when compiling boot classes with instrinsic methods.
static constexpr uint32_t kAccIntrinsic = 0x80000000; // method (runtime)
// Special runtime-only flags.
@@ -89,8 +94,10 @@
// class/ancestor overrides finalize()
static constexpr uint32_t kAccClassIsFinalizable = 0x80000000;
-static constexpr uint32_t kAccFlagsNotUsedByIntrinsic = 0x00FFFFFF;
-static constexpr uint32_t kAccMaxIntrinsic = 0x7F;
+// Continuous sequence of bits used to hold the ordinal of an intrinsic method. Flags
+// which overlap are not valid when kAccIntrinsic is set.
+static constexpr uint32_t kAccIntrinsicBits = kAccMayBeUnusedBits | kAccSingleImplementation |
+ kAccMustCountLocks | kAccCompileDontBother | kAccDefaultConflict | kAccPreviouslyWarm;
// Valid (meaningful) bits for a field.
static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected |
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index cfdf20d..ee428ed 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -431,7 +431,7 @@
}
}
if ((verifier.encountered_failure_types_ & VerifyError::VERIFY_ERROR_LOCKING) != 0) {
- method->AddAccessFlags(kAccMustCountLocks);
+ method->SetMustCountLocks();
}
}
} else {
diff --git a/test/458-checker-instruct-simplification/src/Main.java b/test/458-checker-instruct-simplification/src/Main.java
index 5c36ce9..f36c261 100644
--- a/test/458-checker-instruct-simplification/src/Main.java
+++ b/test/458-checker-instruct-simplification/src/Main.java
@@ -2038,6 +2038,84 @@
return (value >> temp) + temp;
}
+ /// CHECK-START: int Main.$noinline$intUnnecessaryShiftModifications(int, int) instruction_simplifier (before)
+ /// CHECK: <<Value:i\d+>> ParameterValue
+ /// CHECK: <<Shift:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const32:i\d+>> IntConstant 32
+ /// CHECK-DAG: <<Const64:i\d+>> IntConstant 64
+ /// CHECK-DAG: <<Const96:i\d+>> IntConstant 96
+ /// CHECK-DAG: <<Const128:i\d+>> IntConstant 128
+ /// CHECK-DAG: <<Or:i\d+>> Or [<<Shift>>,<<Const32>>]
+ /// CHECK-DAG: <<Xor:i\d+>> Xor [<<Shift>>,<<Const64>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Shift>>,<<Const96>>]
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Shift>>,<<Const128>>]
+ /// CHECK-DAG: <<Conv:b\d+>> TypeConversion [<<Shift>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Or>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Xor>>]
+ /// CHECK-DAG: UShr [<<Value>>,<<Add>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Sub>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$intUnnecessaryShiftModifications(int, int) instruction_simplifier (after)
+ /// CHECK: <<Value:i\d+>> ParameterValue
+ /// CHECK: <<Shift:i\d+>> ParameterValue
+ /// CHECK-DAG: Shl [<<Value>>,<<Shift>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Shift>>]
+ /// CHECK-DAG: UShr [<<Value>>,<<Shift>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Shift>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Shift>>]
+
+ public static int $noinline$intUnnecessaryShiftModifications(int value, int shift) {
+ if (doThrow) { throw new Error(); }
+ int c128 = 128;
+ return (value << (shift | 32)) +
+ (value >> (shift ^ 64)) +
+ (value >>> (shift + 96)) +
+ (value << (shift - c128)) + // Needs a named constant to generate Sub.
+ (value >> ((byte) shift));
+ }
+
+ /// CHECK-START: int Main.$noinline$intNecessaryShiftModifications(int, int) instruction_simplifier (before)
+ /// CHECK: <<Value:i\d+>> ParameterValue
+ /// CHECK: <<Shift:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
+ /// CHECK-DAG: <<Const65:i\d+>> IntConstant 65
+ /// CHECK-DAG: <<Const97:i\d+>> IntConstant 97
+ /// CHECK-DAG: <<Const129:i\d+>> IntConstant 129
+ /// CHECK-DAG: <<Or:i\d+>> Or [<<Shift>>,<<Const33>>]
+ /// CHECK-DAG: <<Xor:i\d+>> Xor [<<Shift>>,<<Const65>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Shift>>,<<Const97>>]
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Shift>>,<<Const129>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Or>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Xor>>]
+ /// CHECK-DAG: UShr [<<Value>>,<<Add>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Sub>>]
+
+ /// CHECK-START: int Main.$noinline$intNecessaryShiftModifications(int, int) instruction_simplifier (after)
+ /// CHECK: <<Value:i\d+>> ParameterValue
+ /// CHECK: <<Shift:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
+ /// CHECK-DAG: <<Const65:i\d+>> IntConstant 65
+ /// CHECK-DAG: <<Const97:i\d+>> IntConstant 97
+ /// CHECK-DAG: <<Const129:i\d+>> IntConstant 129
+ /// CHECK-DAG: <<Or:i\d+>> Or [<<Shift>>,<<Const33>>]
+ /// CHECK-DAG: <<Xor:i\d+>> Xor [<<Shift>>,<<Const65>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Shift>>,<<Const97>>]
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Shift>>,<<Const129>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Or>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Xor>>]
+ /// CHECK-DAG: UShr [<<Value>>,<<Add>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Sub>>]
+
+ public static int $noinline$intNecessaryShiftModifications(int value, int shift) {
+ if (doThrow) { throw new Error(); }
+ int c129 = 129;
+ return (value << (shift | 33)) +
+ (value >> (shift ^ 65)) +
+ (value >>> (shift + 97)) +
+ (value << (shift - c129)); // Needs a named constant to generate Sub.
+ }
+
/// CHECK-START: int Main.$noinline$intAddSubSimplifyArg1(int, int) instruction_simplifier (before)
/// CHECK: <<X:i\d+>> ParameterValue
/// CHECK: <<Y:i\d+>> ParameterValue
@@ -2363,14 +2441,22 @@
assertIntEquals(26, $noinline$runSmaliTestInt("SubSubConst3", 5));
assertIntEquals(0x5e6f7808, $noinline$intUnnecessaryShiftMasking(0xabcdef01, 3));
assertIntEquals(0x5e6f7808, $noinline$intUnnecessaryShiftMasking(0xabcdef01, 3 + 32));
- assertLongEquals(0xffffffffffffeaf3L, $noinline$longUnnecessaryShiftMasking(0xabcdef0123456789L, 50));
- assertLongEquals(0xffffffffffffeaf3L, $noinline$longUnnecessaryShiftMasking(0xabcdef0123456789L, 50 + 64));
+ assertLongEquals(0xffffffffffffeaf3L,
+ $noinline$longUnnecessaryShiftMasking(0xabcdef0123456789L, 50));
+ assertLongEquals(0xffffffffffffeaf3L,
+ $noinline$longUnnecessaryShiftMasking(0xabcdef0123456789L, 50 + 64));
assertIntEquals(0x2af37b, $noinline$intUnnecessaryWiderShiftMasking(0xabcdef01, 10));
assertIntEquals(0x2af37b, $noinline$intUnnecessaryWiderShiftMasking(0xabcdef01, 10 + 128));
- assertLongEquals(0xaf37bc048d159e24L, $noinline$longSmallerShiftMasking(0xabcdef0123456789L, 2));
- assertLongEquals(0xaf37bc048d159e24L, $noinline$longSmallerShiftMasking(0xabcdef0123456789L, 2 + 256));
+ assertLongEquals(0xaf37bc048d159e24L,
+ $noinline$longSmallerShiftMasking(0xabcdef0123456789L, 2));
+ assertLongEquals(0xaf37bc048d159e24L,
+ $noinline$longSmallerShiftMasking(0xabcdef0123456789L, 2 + 256));
assertIntEquals(0xfffd5e7c, $noinline$otherUseOfUnnecessaryShiftMasking(0xabcdef01, 13));
assertIntEquals(0xfffd5e7c, $noinline$otherUseOfUnnecessaryShiftMasking(0xabcdef01, 13 + 512));
+ assertIntEquals(0x5f49eb48, $noinline$intUnnecessaryShiftModifications(0xabcdef01, 2));
+ assertIntEquals(0xbd4c29b0, $noinline$intUnnecessaryShiftModifications(0xabcdef01, 3));
+ assertIntEquals(0xc0fed1ca, $noinline$intNecessaryShiftModifications(0xabcdef01, 2));
+ assertIntEquals(0x03578ebc, $noinline$intNecessaryShiftModifications(0xabcdef01, 3));
assertIntEquals(654321, $noinline$intAddSubSimplifyArg1(arg, 654321));
assertIntEquals(arg, $noinline$intAddSubSimplifyArg2(arg, 654321));
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 418be30..f6d3bba 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -497,6 +497,13 @@
}
}
+ // Mixed of 16-bit and 8-bit array references.
+ static void castAndNarrow(byte[] x, char[] y) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = (byte) ((short) y[i] + 1);
+ }
+ }
+
public static void main(String[] args) {
expectEquals(10, earlyExitFirst(-1));
for (int i = 0; i <= 10; i++) {
@@ -650,6 +657,15 @@
expectEquals(2805, f[i]);
}
+ char[] cx = new char[259];
+ for (int i = 0; i < 259; i++) {
+ cx[i] = (char) (i - 100);
+ }
+ castAndNarrow(b1, cx);
+ for (int i = 0; i < 259; i++) {
+ expectEquals((byte)((short) cx[i] + 1), b1[i]);
+ }
+
System.out.println("passed");
}
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
index c49d85d..57c51a6 100644
--- a/test/645-checker-abs-simd/src/Main.java
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -131,6 +131,28 @@
}
}
+ /// CHECK-START: void Main.doitCastedChar(char[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitCastedChar(char[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ private static void doitCastedChar(char[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = (char) Math.abs((short) x[i]);
+ }
+ }
+
/// CHECK-START: void Main.doitInt(int[]) loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
@@ -298,7 +320,7 @@
xc[i] = (char) i;
}
doitChar(xc);
- for (int i = 0; i < 1024 *64; i++) {
+ for (int i = 0; i < 1024 * 64; i++) {
expectEquals32((char) Math.abs((char) i), xc[i]);
}
short[] xs = new short[1024 * 64];
@@ -309,6 +331,13 @@
for (int i = 0; i < 1024 * 64; i++) {
expectEquals32((short) Math.abs((short) i), xs[i]);
}
+ for (int i = 0; i < 1024 * 64; i++) {
+ xc[i] = (char) i;
+ }
+ doitCastedChar(xc);
+ for (int i = 0; i < 1024 * 64; i++) {
+ expectEquals32((char) Math.abs((short) i), xc[i]);
+ }
// Set up minint32, maxint32 and some others.
int[] xi = new int[8];
xi[0] = 0x80000000;
diff --git a/test/660-checker-simd-sad-short3/expected.txt b/test/660-checker-simd-sad-short3/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/660-checker-simd-sad-short3/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/660-checker-simd-sad-short3/info.txt b/test/660-checker-simd-sad-short3/info.txt
new file mode 100644
index 0000000..b56c119
--- /dev/null
+++ b/test/660-checker-simd-sad-short3/info.txt
@@ -0,0 +1 @@
+Functional tests on SAD vectorization.
diff --git a/test/660-checker-simd-sad-short3/src/Main.java b/test/660-checker-simd-sad-short3/src/Main.java
new file mode 100644
index 0000000..c8850b4
--- /dev/null
+++ b/test/660-checker-simd-sad-short3/src/Main.java
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ *
+ * Some special cases: parameters, constants, invariants, casted computations.
+ */
+public class Main {
+
+ /// CHECK-START: int Main.sadShort2IntParamRight(short[], short) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Param:s\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get>>,<<Param>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntParamRight(short[], short) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Param:s\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<Param>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons8>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntParamRight(short[] s, short param) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(s[i] - param);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntParamLeft(short[], short) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Param:s\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Param>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntParamLeft(short[], short) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Param:s\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<Param>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons8>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntParamLeft(short[] s, short param) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(param - s[i]);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntConstRight(short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant -32767 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<ConsI>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Add>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntConstRight(short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<ConsI>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons8>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntConstRight(short[] s) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(s[i] - 32767);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntConstLeft(short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<ConsI>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntConstLeft(short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<ConsI>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons8>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntConstLeft(short[] s) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(32767 - s[i]);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntInvariantRight(short[], int) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get>>,<<Conv>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntInvariantRight(short[], int) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<Conv>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons8>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntInvariantRight(short[] s, int val) {
+ int sad = 0;
+ short x = (short) (val + 1);
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(s[i] - x);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntInvariantLeft(short[], int) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Conv>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntInvariantLeft(short[], int) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<Conv>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntInvariantLeft(short[] s, int val) {
+ int sad = 0;
+ short x = (short) (val + 1);
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(x - s[i]);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntCastedExprRight(short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 110 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> [<<Get>>,<<ConsI>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get>>,<<Conv>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntCastedExprRight(short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 110 loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<ConsI>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntCastedExprRight(short[] s) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ short x = (short) (s[i] + 110); // narrower part sign extends
+ sad += Math.abs(s[i] - x);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntCastedExprLeft(short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 110 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> [<<Get>>,<<ConsI>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Conv>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntCastedExprLeft(short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 110 loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<ConsI>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Add>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntCastedExprLeft(short[] s) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ short x = (short) (s[i] + 110); // narrower part sign extends
+ sad += Math.abs(x - s[i]);
+ }
+ return sad;
+ }
+
+ public static void main(String[] args) {
+ short[] interesting = {
+ (short) 0x0000,
+ (short) 0x0001,
+ (short) 0x0002,
+ (short) 0x0003,
+ (short) 0x0004,
+ (short) 0x1234,
+ (short) 0x8000,
+ (short) 0x8001,
+ (short) 0x8002,
+ (short) 0x8003,
+ (short) 0x8004,
+ (short) 0x8004,
+ (short) 0x7000,
+ (short) 0x7fff,
+ (short) 0xf000,
+ (short) 0xffff
+ };
+ short[] s = new short[64];
+ for (int i = 0; i < 64; i++) {
+ s[i] = interesting[i % interesting.length];
+ }
+
+ expectEquals(1067200, sadShort2IntParamRight(s, (short)-1));
+ expectEquals(1067200, sadShort2IntParamRight(s, (short) 0));
+ expectEquals(1067208, sadShort2IntParamRight(s, (short) 1));
+ expectEquals(1067224, sadShort2IntParamRight(s, (short) 2));
+ expectEquals(2635416, sadShort2IntParamRight(s, (short) 0x7fff));
+ expectEquals(1558824, sadShort2IntParamRight(s, (short) 0x8000));
+
+ expectEquals(1067200, sadShort2IntParamLeft(s, (short)-1));
+ expectEquals(1067200, sadShort2IntParamLeft(s, (short) 0));
+ expectEquals(1067208, sadShort2IntParamLeft(s, (short) 1));
+ expectEquals(1067224, sadShort2IntParamLeft(s, (short) 2));
+ expectEquals(2635416, sadShort2IntParamLeft(s, (short) 0x7fff));
+ expectEquals(1558824, sadShort2IntParamLeft(s, (short) 0x8000));
+
+ expectEquals(2635416, sadShort2IntConstRight(s));
+ expectEquals(2635416, sadShort2IntConstLeft(s));
+
+ expectEquals(1067200, sadShort2IntInvariantRight(s, -2));
+ expectEquals(1067200, sadShort2IntInvariantRight(s, -1));
+ expectEquals(1067208, sadShort2IntInvariantRight(s, 0));
+ expectEquals(1067224, sadShort2IntInvariantRight(s, 1));
+ expectEquals(2635416, sadShort2IntInvariantRight(s, 0x7ffe));
+ expectEquals(1558824, sadShort2IntInvariantRight(s, 0x7fff));
+
+ expectEquals(1067200, sadShort2IntInvariantLeft(s, -2));
+ expectEquals(1067200, sadShort2IntInvariantLeft(s, -1));
+ expectEquals(1067208, sadShort2IntInvariantLeft(s, 0));
+ expectEquals(1067224, sadShort2IntInvariantLeft(s, 1));
+ expectEquals(2635416, sadShort2IntInvariantLeft(s, 0x7ffe));
+ expectEquals(1558824, sadShort2IntInvariantLeft(s, 0x7fff));
+
+ expectEquals(268304, sadShort2IntCastedExprLeft(s));
+ expectEquals(268304, sadShort2IntCastedExprRight(s));
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/910-methods/check b/test/910-methods/check
index 8358500..f9552ad 100644
--- a/test/910-methods/check
+++ b/test/910-methods/check
@@ -19,4 +19,8 @@
patch -p0 expected.txt < expected_jack.diff
fi
+if [[ "$DX" == 'd8' ]]; then
+ patch -p0 expected.txt < expected_d8.diff
+fi
+
./default-check "$@"
diff --git a/test/910-methods/expected_d8.diff b/test/910-methods/expected_d8.diff
new file mode 100644
index 0000000..2c5d085
--- /dev/null
+++ b/test/910-methods/expected_d8.diff
@@ -0,0 +1,4 @@
+7c7
+< Location end: 39
+---
+> Location end: 36
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 6017d28..4b49142 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -22,8 +22,7 @@
$(HOST_OUT_EXECUTABLES)/dx \
$(HOST_OUT_EXECUTABLES)/jasmin \
$(HOST_OUT_EXECUTABLES)/smali \
- $(HOST_OUT_EXECUTABLES)/dexmerger \
- $(JACK)
+ $(HOST_OUT_EXECUTABLES)/dexmerger
# Convert's a rule name to the form used in variables, e.g. no-relocate to NO_RELOCATE
define name-to-var
@@ -124,19 +123,9 @@
# Host executables.
host_prereq_rules := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
-ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
-# Classpath for Jack compilation for host.
-host_prereq_rules += $(HOST_JACK_CLASSPATH_DEPENDENCIES)
-endif
-
-# Required for dx, jasmin, smali, dexmerger, jack.
+# Required for dx, jasmin, smali, dexmerger.
host_prereq_rules += $(TEST_ART_RUN_TEST_DEPENDENCIES)
-ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
-# Classpath for Jack compilation for target.
-target_prereq_rules := $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
-endif
-
# Sync test files to the target, depends upon all things that must be pushed
#to the target.
target_prereq_rules += test-art-target-sync
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index ed40cb7..a765b17 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -48,15 +48,22 @@
time.
* That we don't show the 'extra' column in the DominatedList if we are
showing all the instances.
- * That Instance.asString properly takes into account "offset" and
- "count" fields, if they are present.
* Instance.getDexCacheLocation
Reported Issues:
* Request to be able to sort tables by size.
Release History:
- 1.4 Pending
+ 1.5 Pending
+
+ 1.4 October 03, 2017
+ Give better error messages on failure to launch ahat.
+ Properly mark thread and non-default root objects as roots.
+ Improve startup performance, in some cases significantly.
+ Other miscellaneous bug fixes.
+
+ 1.3.1 August 22, 2017
+ Don't include weak references in sample paths.
1.3 July 25, 2017
Improve diffing of static and instance fields.
diff --git a/tools/ahat/src/manifest.txt b/tools/ahat/src/manifest.txt
index d893c5e..1753406 100644
--- a/tools/ahat/src/manifest.txt
+++ b/tools/ahat/src/manifest.txt
@@ -1,4 +1,4 @@
Name: ahat/
Implementation-Title: ahat
-Implementation-Version: 1.3
+Implementation-Version: 1.4
Main-Class: com.android.ahat.Main
diff --git a/tools/run-prebuilt-libjdwp-tests.sh b/tools/run-prebuilt-libjdwp-tests.sh
new file mode 100755
index 0000000..46c2a15
--- /dev/null
+++ b/tools/run-prebuilt-libjdwp-tests.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ ! -d libcore ]]; then
+ echo "Script needs to be run at the root of the android tree"
+ exit 1
+fi
+
+source build/envsetup.sh >&/dev/null # for get_build_var, setpaths
+setpaths # include platform prebuilt java, javac, etc in $PATH.
+
+if [[ `uname` != 'Linux' ]]; then
+ echo "Script cannot be run on $(uname). It is Linux only."
+ exit 2
+fi
+
+jdwp_path=${ANDROID_JAVA_HOME}/jre/lib/amd64/libjdwp.so
+if [[ ! -f $jdwp_path ]]; then
+ echo "Unable to find prebuilts libjdwp.so! Did the version change from jdk8?"
+ exit 3
+fi
+
+args=("$@")
+debug="no"
+has_variant="no"
+has_mode="no"
+
+while true; do
+ if [[ $1 == "--debug" ]]; then
+ debug="yes"
+ shift
+ elif [[ "$1" == --mode=* ]]; then
+ has_mode="yes"
+ if [[ $1 != "--mode=host" ]]; then
+ # Just print out an actually helpful error message.
+ echo "Only host tests can be run against prebuilt libjdwp"
+ exit 4
+ fi
+ shift
+ elif [[ $1 == --variant=* ]]; then
+ has_variant="yes"
+ if [[ $1 != "--variant=x64" ]] && [[ $1 != "--variant=X64" ]]; then
+ # Just print out an actually helpful error message.
+ echo "Only 64bit runs can be tested against the prebuilt libjdwp!"
+ exit 5
+ fi
+ shift
+ elif [[ "$1" == "" ]]; then
+ break
+ else
+ shift
+ fi
+done
+
+if [[ "$has_mode" = "no" ]]; then
+ args+=(--mode=host)
+fi
+
+if [[ "$has_variant" = "no" ]]; then
+ args+=(--variant=X64)
+fi
+
+wrapper_name=""
+plugin=""
+if [[ "$debug" = "yes" ]]; then
+ wrapper_name=libwrapagentpropertiesd
+ plugin="$ANDROID_HOST_OUT/lib64/libopenjdkjvmtid.so"
+else
+ wrapper_name=libwrapagentproperties
+ plugin="$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so"
+fi
+wrapper=$ANDROID_HOST_OUT/lib64/${wrapper_name}.so
+
+if [[ ! -f $wrapper ]]; then
+ echo "need to build $wrapper to run prebuild-libjdwp-tests!"
+ echo "m -j40 ${wrapper/.so/}"
+ exit 6
+fi
+
+if [[ ! -f $plugin ]]; then
+ echo "jvmti plugin not built!"
+ exit 7
+fi
+
+props_path=$PWD/art/tools/libjdwp-compat.props
+expect_path=$PWD/art/tools/libjdwp_art_failures.txt
+
+function verbose_run() {
+ echo "$@"
+ env "$@"
+}
+
+verbose_run LD_LIBRARY_PATH="$(dirname $jdwp_path):$LD_LIBRARY_PATH" \
+ ./art/tools/run-jdwp-tests.sh \
+ "${args[@]}" \
+ "-Xplugin:$plugin" \
+ --agent-wrapper "${wrapper}"="${props_path}" \
+ --jdwp-path "$jdwp_path" \
+ --expectations "$expect_path"