ARM64: track alignment padding registers on AAPCS targets
This implements clause C.8 of the AAPCS in the front-end, so that Clang
accurately knows when the registers run out and it has to insert padding before
the stack objects begin.
PR19432.
llvm-svn: 206296
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 1f98920..64f3209 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -3291,6 +3291,10 @@
Ty = EnumTy->getDecl()->getIntegerType();
if (!Ty->isFloatingType() && !Ty->isVectorType()) {
+ unsigned Alignment = getContext().getTypeAlign(Ty);
+ if (!isDarwinPCS() && Alignment > 64)
+ AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
+
int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
AllocatedGPR += RegsNeeded;
}
@@ -3328,12 +3332,16 @@
// Aggregates <= 16 bytes are passed directly in registers or on the stack.
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 128) {
+ unsigned Alignment = getContext().getTypeAlign(Ty);
+ if (!isDarwinPCS() && Alignment > 64)
+ AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
+
Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
AllocatedGPR += Size / 64;
IsSmallAggr = true;
// We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
// For aggregates with 16-byte alignment, we use i128.
- if (getContext().getTypeAlign(Ty) < 128 && Size == 128) {
+ if (Alignment < 128 && Size == 128) {
llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
}
diff --git a/clang/test/CodeGen/arm64-aapcs-arguments.c b/clang/test/CodeGen/arm64-aapcs-arguments.c
new file mode 100644
index 0000000..283296c
--- /dev/null
+++ b/clang/test/CodeGen/arm64-aapcs-arguments.c
@@ -0,0 +1,14 @@
+// RUN: %clang_cc1 -triple arm64-linux-gnu -target-abi aapcs -ffreestanding -emit-llvm -w -o - %s | FileCheck %s
+
+// AAPCS clause C.8 says: If the argument has an alignment of 16 then the NGRN
+// is rounded up to the next even number.
+
+// CHECK: void @test1(i32 %x0, i128 %x2_x3, i128 %x4_x5, i128 %x6_x7, i128 %sp.coerce)
+typedef union { __int128 a; } Small;
+void test1(int x0, __int128 x2_x3, __int128 x4_x5, __int128 x6_x7, Small sp) {
+}
+
+
+// CHECK: void @test2(i32 %x0, i128 %x2_x3.coerce, i32 %x4, i128 %x6_x7.coerce, i32 %sp, i128 %sp16.coerce)
+void test2(int x0, Small x2_x3, int x4, Small x6_x7, int sp, Small sp16) {
+}