Generic expansion for atomic load/store into cmpxchg/atomicrmw xchg; implements 64-bit atomic load/store for ARM.

llvm-svn: 138872
diff --git a/llvm/test/CodeGen/ARM/atomic-64bit.ll b/llvm/test/CodeGen/ARM/atomic-64bit.ll
index abe1acc..e9609ac 100644
--- a/llvm/test/CodeGen/ARM/atomic-64bit.ll
+++ b/llvm/test/CodeGen/ARM/atomic-64bit.ll
@@ -6,7 +6,7 @@
 ; CHECK: ldrexd r2, r3
 ; CHECK: adds r0, r2
 ; CHECK: adc r1, r3
-; CHECK: strexd {{r[0-9]+}}, r0, r1
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
 ; CHECK: cmp
 ; CHECK: bne
 ; CHECK: dmb ish
@@ -20,7 +20,7 @@
 ; CHECK: ldrexd r2, r3
 ; CHECK: subs r0, r2
 ; CHECK: sbc r1, r3
-; CHECK: strexd {{r[0-9]+}}, r0, r1
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
 ; CHECK: cmp
 ; CHECK: bne
 ; CHECK: dmb ish
@@ -34,7 +34,7 @@
 ; CHECK: ldrexd r2, r3
 ; CHECK: and r0, r2
 ; CHECK: and r1, r3
-; CHECK: strexd {{r[0-9]+}}, r0, r1
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
 ; CHECK: cmp
 ; CHECK: bne
 ; CHECK: dmb ish
@@ -48,7 +48,7 @@
 ; CHECK: ldrexd r2, r3
 ; CHECK: orr r0, r2
 ; CHECK: orr r1, r3
-; CHECK: strexd {{r[0-9]+}}, r0, r1
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
 ; CHECK: cmp
 ; CHECK: bne
 ; CHECK: dmb ish
@@ -62,7 +62,7 @@
 ; CHECK: ldrexd r2, r3
 ; CHECK: eor r0, r2
 ; CHECK: eor r1, r3
-; CHECK: strexd {{r[0-9]+}}, r0, r1
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
 ; CHECK: cmp
 ; CHECK: bne
 ; CHECK: dmb ish
@@ -74,7 +74,7 @@
 ; CHECK: test6
 ; CHECK: dmb ish
 ; CHECK: ldrexd r2, r3
-; CHECK: strexd {{r[0-9]+}}, r0, r1
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
 ; CHECK: cmp
 ; CHECK: bne
 ; CHECK: dmb ish
@@ -89,10 +89,40 @@
 ; CHECK: cmp r2
 ; CHECK: cmpeq r3
 ; CHECK: bne
-; CHECK: strexd {{r[0-9]+}}, r0, r1
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
 ; CHECK: cmp
 ; CHECK: bne
 ; CHECK: dmb ish
   %r = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst
   ret i64 %r
 }
+
+; Compiles down to cmpxchg
+; FIXME: Should compile to a single ldrexd
+define i64 @test8(i64* %ptr) {
+; CHECK: test8
+; CHECK: ldrexd r2, r3
+; CHECK: cmp r2
+; CHECK: cmpeq r3
+; CHECK: bne
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+  %r = load atomic i64* %ptr seq_cst, align 8
+  ret i64 %r
+}
+
+; Compiles down to atomicrmw xchg; there really isn't any more efficient
+; way to write it.
+define void @test9(i64* %ptr, i64 %val) {
+; CHECK: test9
+; CHECK: dmb ish
+; CHECK: ldrexd r2, r3
+; CHECK: strexd {{[a-z0-9]+}}, r0, r1
+; CHECK: cmp
+; CHECK: bne
+; CHECK: dmb ish
+  store atomic i64 %val, i64* %ptr seq_cst, align 8
+  ret void
+}