ARM64: initial backend import
This adds a second implementation of the AArch64 architecture to LLVM,
accessible in parallel via the "arm64" triple. The plan over the
coming weeks & months is to merge the two into a single backend,
during which time thorough code review should naturally occur.
Everything will be easier with the target in-tree though, hence this
commit.
llvm-svn: 205090
diff --git a/llvm/test/CodeGen/ARM64/volatile.ll b/llvm/test/CodeGen/ARM64/volatile.ll
new file mode 100644
index 0000000..e00ac5a
--- /dev/null
+++ b/llvm/test/CodeGen/ARM64/volatile.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+define i64 @normal_load(i64* nocapture %bar) nounwind readonly {
+; CHECK: normal_load
+; CHECK: ldp
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+ %add.ptr = getelementptr inbounds i64* %bar, i64 1
+ %tmp = load i64* %add.ptr, align 8
+ %add.ptr1 = getelementptr inbounds i64* %bar, i64 2
+ %tmp1 = load i64* %add.ptr1, align 8
+ %add = add nsw i64 %tmp1, %tmp
+ ret i64 %add
+}
+
+define i64 @volatile_load(i64* nocapture %bar) nounwind {
+; CHECK: volatile_load
+; CHECK: ldr
+; CHECK-NEXT: ldr
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+ %add.ptr = getelementptr inbounds i64* %bar, i64 1
+ %tmp = load volatile i64* %add.ptr, align 8
+ %add.ptr1 = getelementptr inbounds i64* %bar, i64 2
+ %tmp1 = load volatile i64* %add.ptr1, align 8
+ %add = add nsw i64 %tmp1, %tmp
+ ret i64 %add
+}