[Hexagon] Add support for atomic RMW operations

llvm-svn: 241804
diff --git a/llvm/test/CodeGen/Hexagon/Atomics.ll b/llvm/test/CodeGen/Hexagon/Atomics.ll
new file mode 100644
index 0000000..bbac5d7
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/Atomics.ll
@@ -0,0 +1,71 @@
+; RUN: llc < %s -march=hexagon
+
+@si = common global i32 0, align 4
+@sll = common global i64 0, align 8
+
+define void @test_op_ignore() nounwind {
+entry:
+  %t00 = atomicrmw add i32* @si, i32 1 monotonic
+  %t01 = atomicrmw add i64* @sll, i64 1 monotonic
+  %t10 = atomicrmw sub i32* @si, i32 1 monotonic
+  %t11 = atomicrmw sub i64* @sll, i64 1 monotonic
+  %t20 = atomicrmw or i32* @si, i32 1 monotonic
+  %t21 = atomicrmw or i64* @sll, i64 1 monotonic
+  %t30 = atomicrmw xor i32* @si, i32 1 monotonic
+  %t31 = atomicrmw xor i64* @sll, i64 1 monotonic
+  %t40 = atomicrmw and i32* @si, i32 1 monotonic
+  %t41 = atomicrmw and i64* @sll, i64 1 monotonic
+  %t50 = atomicrmw nand i32* @si, i32 1 monotonic
+  %t51 = atomicrmw nand i64* @sll, i64 1 monotonic
+  br label %return
+
+return:                                           ; preds = %entry
+  ret void
+}
+
+define void @test_fetch_and_op() nounwind {
+entry:
+  %t00 = atomicrmw add i32* @si, i32 11 monotonic
+  store i32 %t00, i32* @si, align 4
+  %t01 = atomicrmw add i64* @sll, i64 11 monotonic
+  store i64 %t01, i64* @sll, align 8
+  %t10 = atomicrmw sub i32* @si, i32 11 monotonic
+  store i32 %t10, i32* @si, align 4
+  %t11 = atomicrmw sub i64* @sll, i64 11 monotonic
+  store i64 %t11, i64* @sll, align 8
+  %t20 = atomicrmw or i32* @si, i32 11 monotonic
+  store i32 %t20, i32* @si, align 4
+  %t21 = atomicrmw or i64* @sll, i64 11 monotonic
+  store i64 %t21, i64* @sll, align 8
+  %t30 = atomicrmw xor i32* @si, i32 11 monotonic
+  store i32 %t30, i32* @si, align 4
+  %t31 = atomicrmw xor i64* @sll, i64 11 monotonic
+  store i64 %t31, i64* @sll, align 8
+  %t40 = atomicrmw and i32* @si, i32 11 monotonic
+  store i32 %t40, i32* @si, align 4
+  %t41 = atomicrmw and i64* @sll, i64 11 monotonic
+  store i64 %t41, i64* @sll, align 8
+  %t50 = atomicrmw nand i32* @si, i32 11 monotonic
+  store i32 %t50, i32* @si, align 4
+  %t51 = atomicrmw nand i64* @sll, i64 11 monotonic
+  store i64 %t51, i64* @sll, align 8
+  br label %return
+
+return:                                           ; preds = %entry
+  ret void
+}
+
+define void @test_lock() nounwind {
+entry:
+  %t00 = atomicrmw xchg i32* @si, i32 1 monotonic
+  store i32 %t00, i32* @si, align 4
+  %t01 = atomicrmw xchg i64* @sll, i64 1 monotonic
+  store i64 %t01, i64* @sll, align 8
+  fence seq_cst
+  store volatile i32 0, i32* @si, align 4
+  store volatile i64 0, i64* @sll, align 8
+  br label %return
+
+return:                                           ; preds = %entry
+  ret void
+}