Port memory barriers intrinsics to AArch64

Memory barrier __builtin_arm_[dmb, dsb, isb] intrinsics are required to
implement their corresponding ACLE and MSVC intrinsics.

This patch ports ARM dmb, dsb, isb intrinsic to AArch64.

Differential Revision: http://reviews.llvm.org/D4520

llvm-svn: 213247
diff --git a/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll b/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
new file mode 100644
index 0000000..09e34ae
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
@@ -0,0 +1,57 @@
+; RUN: llc < %s -mtriple=aarch64-eabi -O=3 | FileCheck %s
+
+define void @test() {
+  ; CHECK: dmb sy
+  call void @llvm.aarch64.dmb(i32 15)
+  ; CHECK: dmb osh
+  call void @llvm.aarch64.dmb(i32 3)
+  ; CHECK: dsb sy
+  call void @llvm.aarch64.dsb(i32 15)
+  ; CHECK: dsb ishld
+  call void @llvm.aarch64.dsb(i32 9)
+  ; CHECK: isb
+  call void @llvm.aarch64.isb(i32 15)
+  ret void
+}
+
+; Important point is that the compiler should not reorder memory access
+; instructions around DMB.
+; Failure to do so, two STRs will collapse into one STP.
+define void @test_dmb_reordering(i32 %a, i32 %b, i32* %d) {
+  store i32 %a, i32* %d              ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
+
+  call void @llvm.aarch64.dmb(i32 15); CHECK: dmb sy
+
+  %d1 = getelementptr i32* %d, i64 1
+  store i32 %b, i32* %d1             ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
+
+  ret void
+}
+
+; Similarly for DSB.
+define void @test_dsb_reordering(i32 %a, i32 %b, i32* %d) {
+  store i32 %a, i32* %d              ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
+
+  call void @llvm.aarch64.dsb(i32 15); CHECK: dsb sy
+
+  %d1 = getelementptr i32* %d, i64 1
+  store i32 %b, i32* %d1             ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
+
+  ret void
+}
+
+; And ISB.
+define void @test_isb_reordering(i32 %a, i32 %b, i32* %d) {
+  store i32 %a, i32* %d              ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
+
+  call void @llvm.aarch64.isb(i32 15); CHECK: isb
+
+  %d1 = getelementptr i32* %d, i64 1
+  store i32 %b, i32* %d1             ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
+
+  ret void
+}
+
+declare void @llvm.aarch64.dmb(i32)
+declare void @llvm.aarch64.dsb(i32)
+declare void @llvm.aarch64.isb(i32)