make the x86-32 backend specify a byval alignment, even when the
code generator will do it. With this patch, clang compiles the example
in PR9794 to not have an alloca temporary.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@131881 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGenCXX/x86_32-arguments.cpp b/test/CodeGenCXX/x86_32-arguments.cpp
index e94e2ca..4c06033 100644
--- a/test/CodeGenCXX/x86_32-arguments.cpp
+++ b/test/CodeGenCXX/x86_32-arguments.cpp
@@ -31,7 +31,7 @@
// CHECK: define void @_ZThn4_N18BasicAliasAnalysis13getModRefInfoE8CallSite
// ...
-// CHECK: %struct.CallSite* byval %CS)
+// CHECK: %struct.CallSite* byval align 4 %CS)
struct CallSite {
unsigned Ptr;
CallSite(unsigned XX) : Ptr(XX) {}
@@ -89,7 +89,7 @@
s5 f5() { return s5(); }
// CHECK: define i32 @_Z4f6_0M2s6i(i32 %a)
-// CHECK: define i64 @_Z4f6_1M2s6FivE(%{{.*}} byval)
+// CHECK: define i64 @_Z4f6_1M2s6FivE(%{{.*}} byval align 4)
// FIXME: It would be nice to avoid byval on the previous case.
struct s6 {};
typedef int s6::* s6_mdp;