Increase ARM APCS preferred alignment for i64 and f64 from 32 bits to 64 bits.
LDM/STM instructions can run one cycle faster on some ARM processors if the
memory address is 64-bit aligned. Radar 8489376.
llvm-svn: 115047
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index ec2b74a..a7d1d9f 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1370,7 +1370,7 @@
unsigned Align = (*Op0->memoperands_begin())->getAlignment();
const Function *Func = MF->getFunction();
unsigned ReqAlign = STI->hasV6Ops()
- ? TD->getPrefTypeAlignment(Type::getInt64Ty(Func->getContext()))
+ ? TD->getABITypeAlignment(Type::getInt64Ty(Func->getContext()))
: 8; // Pre-v6 need 8-byte align
if (Align < ReqAlign)
return false;
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index aefd130..1188c6d 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -218,7 +218,7 @@
std::string getDataLayout() const {
if (isThumb()) {
if (isAPCS_ABI()) {
- return std::string("e-p:32:32-f64:32:32-i64:32:32-"
+ return std::string("e-p:32:32-f64:32:64-i64:32:64-"
"i16:16:32-i8:8:32-i1:8:32-"
"v128:32:128-v64:32:64-a:0:32-n32");
} else {
@@ -228,7 +228,7 @@
}
} else {
if (isAPCS_ABI()) {
- return std::string("e-p:32:32-f64:32:32-i64:32:32-"
+ return std::string("e-p:32:32-f64:32:64-i64:32:64-"
"v128:32:128-v64:32:64-n32");
} else {
return std::string("e-p:32:32-f64:64:64-i64:64:64-"