Raise ARM byval minimum size from 32 to 64, addressing a performance
regression in mason. rdar://problem/7662569
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@130444 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index aa48cb2..bc2472c 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -2344,7 +2344,7 @@
// FIXME: This doesn't handle alignment > 64 bits.
const llvm::Type* ElemTy;
unsigned SizeRegs;
- if (getContext().getTypeSizeInChars(Ty) <= CharUnits::fromQuantity(32)) {
+ if (getContext().getTypeSizeInChars(Ty) <= CharUnits::fromQuantity(64)) {
ElemTy = llvm::Type::getInt32Ty(getVMContext());
SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
} else if (getABIKind() == ARMABIInfo::APCS) {