Atomic load/store on ARM/Thumb.

I don't really like the patterns, but I'm having trouble coming up with a
better way to handle them.

I plan on making other targets use the same legalization
ARM-without-memory-barriers is using... it's not especially efficient, but
if anyone cares, it's not that hard to fix for a given target if there's
some better lowering.

llvm-svn: 138621
diff --git a/llvm/test/CodeGen/ARM/atomic-load-store.ll b/llvm/test/CodeGen/ARM/atomic-load-store.ll
new file mode 100644
index 0000000..1625e53
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/atomic-load-store.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s -check-prefix=ARM
+; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s -check-prefix=THUMBTWO
+; RUN: llc < %s -mtriple=thumbv6-apple-ios | FileCheck %s -check-prefix=THUMBONE
+
+define void @test1(i32* %ptr, i32 %val1) {
+; ARM: test1
+; ARM: dmb ish
+; ARM-NEXT: str
+; ARM-NEXT: dmb ish
+; THUMBONE: test1
+; THUMBONE: __sync_lock_test_and_set_4
+; THUMBTWO: test1
+; THUMBTWO: dmb ish
+; THUMBTWO-NEXT: str
+; THUMBTWO-NEXT: dmb ish
+  store atomic i32 %val1, i32* %ptr seq_cst, align 4
+  ret void
+}
+
+define i32 @test2(i32* %ptr) {
+; ARM: test2
+; ARM: ldr
+; ARM-NEXT: dmb ish
+; THUMBONE: test2
+; THUMBONE: __sync_val_compare_and_swap_4
+; THUMBTWO: test2
+; THUMBTWO: ldr
+; THUMBTWO-NEXT: dmb ish
+  %val = load atomic i32* %ptr seq_cst, align 4
+  ret i32 %val
+}