Run code placement optimization for targets that want it (arm and x86 for now).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@71726 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index dcff47d..0576e3e 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -620,6 +620,13 @@
return allowUnalignedMemoryAccesses;
}
+ /// This function returns true if the target would benefit from code placement
+ /// optimization.
+ /// @brief Determine if the target should perform code placement optimization.
+ bool shouldOptimizeCodePlacement() const {
+ return benefitFromCodePlacementOpt;
+ }
+
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove lowering.
/// It returns MVT::iAny if SelectionDAG should be responsible for
@@ -1652,6 +1659,10 @@
/// operations when copying small arrays and other similar tasks.
/// @brief Indicate whether the target permits unaligned memory accesses.
bool allowUnalignedMemoryAccesses;
+
+ /// This field specifies whether the target can benefit from code placement
+ /// optimization.
+ bool benefitFromCodePlacementOpt;
};
} // end llvm namespace
diff --git a/lib/CodeGen/CodePlacementOpt.cpp b/lib/CodeGen/CodePlacementOpt.cpp
index 61a8b12..919ee54 100644
--- a/lib/CodeGen/CodePlacementOpt.cpp
+++ b/lib/CodeGen/CodePlacementOpt.cpp
@@ -104,6 +104,9 @@
/// jcc <cond> C, [exit]
///
bool CodePlacementOpt::OptimizeIntraLoopEdges() {
+ if (!TLI->shouldOptimizeCodePlacement())
+ return false;
+
bool Changed = false;
for (unsigned i = 0, e = UncondJmpMBBs.size(); i != e; ++i) {
MachineBasicBlock *MBB = UncondJmpMBBs[i].first;
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index d7abd32..1bb3959 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -483,6 +483,7 @@
memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
allowUnalignedMemoryAccesses = false;
+ benefitFromCodePlacementOpt = false;
UseUnderscoreSetJmp = false;
UseUnderscoreLongJmp = false;
SelectIsExpensive = false;
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 48e197d..f5b33b0 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -288,6 +288,7 @@
setIfCvtDupBlockSizeLimit(Subtarget->isThumb() ? 0 : 2);
maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type
+ benefitFromCodePlacementOpt = true;
}
const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 9ac59df..debfac4 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -844,6 +844,7 @@
maxStoresPerMemmove = 3; // For @llvm.memmove -> sequence of stores
allowUnalignedMemoryAccesses = true; // x86 supports it!
setPrefLoopAlignment(16);
+ benefitFromCodePlacementOpt = true;
}