AMDGPU: Fix missing and broken check lines in atomic tests

llvm-svn: 272343
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
index bc37825..1ef8766 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
@@ -1,13 +1,12 @@
 ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
 
-
 ; GCN-LABEL: {{^}}atomic_add_i64_offset:
 ; GCN: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
 define void @atomic_add_i64_offset(i64 addrspace(1)* %out, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -17,7 +16,7 @@
 define void @atomic_add_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -29,7 +28,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -41,7 +40,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -50,7 +49,7 @@
 ; GCN: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
 define void @atomic_add_i64(i64 addrspace(1)* %out, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
   ret void
 }
 
@@ -59,7 +58,7 @@
 ; GCN: buffer_store_dwordx2 [[RET]]
 define void @atomic_add_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -70,7 +69,7 @@
 define void @atomic_add_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
   ret void
 }
 
@@ -81,7 +80,7 @@
 define void @atomic_add_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -91,7 +90,7 @@
 define void @atomic_and_i64_offset(i64 addrspace(1)* %out, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -101,7 +100,7 @@
 define void @atomic_and_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -113,7 +112,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -125,7 +124,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -134,7 +133,7 @@
 ; GCN: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
 define void @atomic_and_i64(i64 addrspace(1)* %out, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
   ret void
 }
 
@@ -143,7 +142,7 @@
 ; GCN: buffer_store_dwordx2 [[RET]]
 define void @atomic_and_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -154,7 +153,7 @@
 define void @atomic_and_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
   ret void
 }
 
@@ -165,7 +164,7 @@
 define void @atomic_and_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -175,7 +174,7 @@
 define void @atomic_sub_i64_offset(i64 addrspace(1)* %out, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -185,7 +184,7 @@
 define void @atomic_sub_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -197,7 +196,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -209,7 +208,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -218,7 +217,7 @@
 ; GCN: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
 define void @atomic_sub_i64(i64 addrspace(1)* %out, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
   ret void
 }
 
@@ -227,7 +226,7 @@
 ; GCN: buffer_store_dwordx2 [[RET]]
 define void @atomic_sub_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -238,7 +237,7 @@
 define void @atomic_sub_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
   ret void
 }
 
@@ -249,7 +248,7 @@
 define void @atomic_sub_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -259,7 +258,7 @@
 define void @atomic_max_i64_offset(i64 addrspace(1)* %out, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -269,7 +268,7 @@
 define void @atomic_max_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -281,7 +280,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -293,7 +292,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -302,7 +301,7 @@
 ; GCN: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
 define void @atomic_max_i64(i64 addrspace(1)* %out, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
   ret void
 }
 
@@ -311,7 +310,7 @@
 ; GCN: buffer_store_dwordx2 [[RET]]
 define void @atomic_max_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -322,7 +321,7 @@
 define void @atomic_max_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
   ret void
 }
 
@@ -333,7 +332,7 @@
 define void @atomic_max_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -343,7 +342,7 @@
 define void @atomic_umax_i64_offset(i64 addrspace(1)* %out, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -353,7 +352,7 @@
 define void @atomic_umax_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -365,7 +364,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -377,7 +376,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -386,7 +385,7 @@
 ; GCN: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
 define void @atomic_umax_i64(i64 addrspace(1)* %out, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
   ret void
 }
 
@@ -395,7 +394,7 @@
 ; GCN: buffer_store_dwordx2 [[RET]]
 define void @atomic_umax_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -406,7 +405,7 @@
 define void @atomic_umax_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
   ret void
 }
 
@@ -417,7 +416,7 @@
 define void @atomic_umax_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -427,7 +426,7 @@
 define void @atomic_min_i64_offset(i64 addrspace(1)* %out, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -437,7 +436,7 @@
 define void @atomic_min_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -449,7 +448,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -461,7 +460,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -470,7 +469,7 @@
 ; GCN: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
 define void @atomic_min_i64(i64 addrspace(1)* %out, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
   ret void
 }
 
@@ -479,7 +478,7 @@
 ; GCN: buffer_store_dwordx2 [[RET]]
 define void @atomic_min_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -490,7 +489,7 @@
 define void @atomic_min_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
   ret void
 }
 
@@ -501,7 +500,7 @@
 define void @atomic_min_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -511,7 +510,7 @@
 define void @atomic_umin_i64_offset(i64 addrspace(1)* %out, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -521,7 +520,7 @@
 define void @atomic_umin_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -533,7 +532,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -545,7 +544,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -554,7 +553,7 @@
 ; GCN: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
 define void @atomic_umin_i64(i64 addrspace(1)* %out, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
   ret void
 }
 
@@ -563,7 +562,7 @@
 ; GCN: buffer_store_dwordx2 [[RET]]
 define void @atomic_umin_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -574,7 +573,7 @@
 define void @atomic_umin_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
   ret void
 }
 
@@ -585,7 +584,7 @@
 define void @atomic_umin_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -595,7 +594,7 @@
 define void @atomic_or_i64_offset(i64 addrspace(1)* %out, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -605,7 +604,7 @@
 define void @atomic_or_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -617,7 +616,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -629,7 +628,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -638,7 +637,7 @@
 ; GCN: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
 define void @atomic_or_i64(i64 addrspace(1)* %out, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
   ret void
 }
 
@@ -647,7 +646,7 @@
 ; GCN: buffer_store_dwordx2 [[RET]]
 define void @atomic_or_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -658,7 +657,7 @@
 define void @atomic_or_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
   ret void
 }
 
@@ -669,7 +668,7 @@
 define void @atomic_or_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -679,7 +678,7 @@
 define void @atomic_xchg_i64_offset(i64 addrspace(1)* %out, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -689,18 +688,19 @@
 define void @atomic_xchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xchg_i64_addr64_offset:
 ; CI: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
+; VI: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}}{{$}}
 define void @atomic_xchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -712,7 +712,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -721,7 +721,7 @@
 ; GCN: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
 define void @atomic_xchg_i64(i64 addrspace(1)* %out, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
   ret void
 }
 
@@ -730,7 +730,7 @@
 ; GCN: buffer_store_dwordx2 [[RET]]
 define void @atomic_xchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -741,7 +741,7 @@
 define void @atomic_xchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
   ret void
 }
 
@@ -752,7 +752,7 @@
 define void @atomic_xchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -762,7 +762,7 @@
 define void @atomic_xor_i64_offset(i64 addrspace(1)* %out, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -772,7 +772,7 @@
 define void @atomic_xor_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
   %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
-  %tmp0  = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -784,7 +784,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
   ret void
 }
 
@@ -796,7 +796,7 @@
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
   %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
-  %tmp0  = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -805,7 +805,7 @@
 ; GCN: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
 define void @atomic_xor_i64(i64 addrspace(1)* %out, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
   ret void
 }
 
@@ -814,7 +814,7 @@
 ; GCN: buffer_store_dwordx2 [[RET]]
 define void @atomic_xor_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
 entry:
-  %tmp0  = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
@@ -825,7 +825,7 @@
 define void @atomic_xor_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
   ret void
 }
 
@@ -836,7 +836,191 @@
 define void @atomic_xor_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
 entry:
   %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
-  %tmp0  = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
+  %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
   store i64 %tmp0, i64 addrspace(1)* %out2
   ret void
 }
+
+; FUNC-LABEL: {{^}}atomic_load_i64_offset:
+; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
+; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
+; GCN: buffer_store_dwordx2 [[RET]]
+define void @atomic_load_i64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
+entry:
+  %gep = getelementptr i64, i64 addrspace(1)* %in, i64 4
+  %val = load atomic i64, i64 addrspace(1)* %gep  seq_cst, align 8
+  store i64 %val, i64 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_load_i64:
+; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
+; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc
+; GCN: buffer_store_dwordx2 [[RET]]
+define void @atomic_load_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
+entry:
+  %val = load atomic i64, i64 addrspace(1)* %in seq_cst, align 8
+  store i64 %val, i64 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_load_i64_addr64_offset:
+; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
+; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
+; GCN: buffer_store_dwordx2 [[RET]]
+define void @atomic_load_i64_addr64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
+entry:
+  %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index
+  %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+  %val = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8
+  store i64 %val, i64 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_load_i64_addr64:
+; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
+; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
+; GCN: buffer_store_dwordx2 [[RET]]
+define void @atomic_load_i64_addr64(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
+entry:
+  %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index
+  %val = load atomic i64, i64 addrspace(1)* %ptr seq_cst, align 8
+  store i64 %val, i64 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i64_offset:
+; CI: buffer_store_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
+; VI: flat_store_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
+define void @atomic_store_i64_offset(i64 %in, i64 addrspace(1)* %out) {
+entry:
+  %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+  store atomic i64 %in, i64 addrspace(1)* %gep  seq_cst, align 8
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i64:
+; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
+; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}] glc
+define void @atomic_store_i64(i64 %in, i64 addrspace(1)* %out) {
+entry:
+  store atomic i64 %in, i64 addrspace(1)* %out seq_cst, align 8
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i64_addr64_offset:
+; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
+; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
+define void @atomic_store_i64_addr64_offset(i64 %in, i64 addrspace(1)* %out, i64 %index) {
+entry:
+  %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+  %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+  store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i64_addr64:
+; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
+; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
+define void @atomic_store_i64_addr64(i64 %in, i64 addrspace(1)* %out, i64 %index) {
+entry:
+  %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+  store atomic i64 %in, i64 addrspace(1)* %ptr seq_cst, align 8
+  ret void
+}
+
+
+
+
+
+
+
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_offset:
+; GCN: buffer_atomic_cmpswapx2 v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
+define void @atomic_cmpxchg_i64_offset(i64 addrspace(1)* %out, i64 %in, i64 %old) {
+entry:
+  %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+  %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_offset:
+; GCN: buffer_atomic_cmpswapx2 v{{\[}}[[RET:[0-9]+]]{{:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
+; GCN: buffer_store_dwordx2 v[[RET]]
+define void @atomic_cmpxchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) {
+entry:
+  %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+  %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
+  %extract0 = extractvalue { i64, i1 } %val, 0
+  store i64 %extract0, i64 addrspace(1)* %out2
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_addr64_offset:
+; SI: buffer_atomic_cmpswapx2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
+
+; VI: flat_atomic_cmpswapx2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
+define void @atomic_cmpxchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) {
+entry:
+  %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+  %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+  %val  = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_addr64_offset:
+; SI: buffer_atomic_cmpswapx2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
+; VI: flat_atomic_cmpswapx2 v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
+; GCN: buffer_store_dword v[[RET]]
+define void @atomic_cmpxchg_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) {
+entry:
+  %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+  %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+  %val  = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
+  %extract0 = extractvalue { i64, i1 } %val, 0
+  store i64 %extract0, i64 addrspace(1)* %out2
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64:
+; GCN: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
+define void @atomic_cmpxchg_i64(i64 addrspace(1)* %out, i64 %in, i64 %old) {
+entry:
+  %val = cmpxchg volatile i64 addrspace(1)* %out, i64 %old, i64 %in seq_cst seq_cst
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret:
+; GCN: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
+; GCN: buffer_store_dword v[[RET]]
+define void @atomic_cmpxchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) {
+entry:
+  %val = cmpxchg volatile i64 addrspace(1)* %out, i64 %old, i64 %in seq_cst seq_cst
+  %extract0 = extractvalue { i64, i1 } %val, 0
+  store i64 %extract0, i64 addrspace(1)* %out2
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_addr64:
+; SI: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
+; VI: flat_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}}
+define void @atomic_cmpxchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) {
+entry:
+  %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+  %val = cmpxchg volatile i64 addrspace(1)* %ptr, i64 %old, i64 %in seq_cst seq_cst
+  ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_addr64:
+; SI: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
+; VI: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
+; GCN: buffer_store_dword v[[RET]]
+define void @atomic_cmpxchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) {
+entry:
+  %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+  %val = cmpxchg volatile i64 addrspace(1)* %ptr, i64 %old, i64 %in seq_cst seq_cst
+  %extract0 = extractvalue { i64, i1 } %val, 0
+  store i64 %extract0, i64 addrspace(1)* %out2
+  ret void
+}