Run code placement optimization for targets that want it (arm and x86 for now).
llvm-svn: 71726
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 48e197d..f5b33b0 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -288,6 +288,7 @@
setIfCvtDupBlockSizeLimit(Subtarget->isThumb() ? 0 : 2);
maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type
+ benefitFromCodePlacementOpt = true;
}
const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9ac59df..debfac4 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -844,6 +844,7 @@
maxStoresPerMemmove = 3; // For @llvm.memmove -> sequence of stores
allowUnalignedMemoryAccesses = true; // x86 supports it!
setPrefLoopAlignment(16);
+ benefitFromCodePlacementOpt = true;
}