[AMDGPU] Switch to the new addr space mapping by default

This requires corresponding clang change.

Differential Revision: https://reviews.llvm.org/D40955

llvm-svn: 324101
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics.ll
index e3d307e..1edb486 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics.ll
@@ -5,29 +5,29 @@
 ; GCN-LABEL: {{^}}atomic_add_i32_offset:
 ; CIVI: flat_atomic_add v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}}
 ; GFX9: flat_atomic_add v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_add_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_add_i32_max_offset:
 ; CIVI: flat_atomic_add v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}}
 ; GFX9: flat_atomic_add v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset:4092{{$}}
-define amdgpu_kernel void @atomic_add_i32_max_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_max_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 1023
-  %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 1023
+  %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_add_i32_max_offset_p1:
 ; GCN: flat_atomic_add v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}}
-define amdgpu_kernel void @atomic_add_i32_max_offset_p1(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_max_offset_p1(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 1024
-  %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 1024
+  %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -35,22 +35,22 @@
 ; CIVI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_add_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_add_i32_addr64_offset:
 ; CIVI: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_add_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -58,60 +58,60 @@
 ; CIVI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_add_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile add i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_add_i32:
 ; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_add_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32(i32* %out, i32 %in) {
 entry:
-  %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst
+  %val = atomicrmw volatile add i32* %out, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_add_i32_ret:
 ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_add_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_ret(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %val = atomicrmw volatile add i32* %out, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_add_i32_addr64:
 ; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_add_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_addr64(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile add i32* %ptr, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_add_i32_ret_addr64:
 ; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_add_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile add i32* %ptr, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_and_i32_offset:
 ; CIVI: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_and_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile and i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -119,22 +119,22 @@
 ; CIVI: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_and_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile and i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_and_i32_addr64_offset:
 ; CIVI: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_and_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile and i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -142,60 +142,60 @@
 ; CIVI: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_and_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile and i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_and_i32:
 ; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_and_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32(i32* %out, i32 %in) {
 entry:
-  %val = atomicrmw volatile and i32 addrspace(4)* %out, i32 %in seq_cst
+  %val = atomicrmw volatile and i32* %out, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_and_i32_ret:
 ; GCN: flat_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_and_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32_ret(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %val = atomicrmw volatile and i32 addrspace(4)* %out, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %val = atomicrmw volatile and i32* %out, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_and_i32_addr64:
 ; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_and_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_addr64(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile and i32 addrspace(4)* %ptr, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile and i32* %ptr, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_and_i32_ret_addr64:
 ; GCN: flat_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_and_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile and i32 addrspace(4)* %ptr, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile and i32* %ptr, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_sub_i32_offset:
 ; CIVI: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_sub_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile sub i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -203,22 +203,22 @@
 ; CIVI: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_sub_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile sub i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_sub_i32_addr64_offset:
 ; CIVI: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_sub_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile sub i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -226,60 +226,60 @@
 ; CIVI: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile sub i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_sub_i32:
 ; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_sub_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32(i32* %out, i32 %in) {
 entry:
-  %val = atomicrmw volatile sub i32 addrspace(4)* %out, i32 %in seq_cst
+  %val = atomicrmw volatile sub i32* %out, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_sub_i32_ret:
 ; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_sub_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32_ret(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %val = atomicrmw volatile sub i32 addrspace(4)* %out, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %val = atomicrmw volatile sub i32* %out, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_sub_i32_addr64:
 ; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_sub_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_addr64(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile sub i32 addrspace(4)* %ptr, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile sub i32* %ptr, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_sub_i32_ret_addr64:
 ; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_sub_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile sub i32 addrspace(4)* %ptr, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile sub i32* %ptr, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_max_i32_offset:
 ; CIVI: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_max_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -287,22 +287,22 @@
 ; CIVI: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_max_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_max_i32_addr64_offset:
 ; CIVI: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_max_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -310,60 +310,60 @@
 ; CIVI: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_max_i32:
 ; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_max_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32(i32* %out, i32 %in) {
 entry:
-  %val = atomicrmw volatile max i32 addrspace(4)* %out, i32 %in seq_cst
+  %val = atomicrmw volatile max i32* %out, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_max_i32_ret:
 ; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_max_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32_ret(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %val = atomicrmw volatile max i32 addrspace(4)* %out, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %val = atomicrmw volatile max i32* %out, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_max_i32_addr64:
 ; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_max_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_addr64(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile max i32 addrspace(4)* %ptr, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile max i32* %ptr, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_max_i32_ret_addr64:
 ; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_max_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile max i32 addrspace(4)* %ptr, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile max i32* %ptr, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umax_i32_offset:
 ; CIVI: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_umax_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -371,22 +371,22 @@
 ; CIVI: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_umax_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umax_i32_addr64_offset:
 ; CIVI: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_umax_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -394,60 +394,60 @@
 ; CIVI: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umax_i32:
 ; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_umax_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32(i32* %out, i32 %in) {
 entry:
-  %val = atomicrmw volatile umax i32 addrspace(4)* %out, i32 %in seq_cst
+  %val = atomicrmw volatile umax i32* %out, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umax_i32_ret:
 ; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_umax_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32_ret(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %val = atomicrmw volatile umax i32 addrspace(4)* %out, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %val = atomicrmw volatile umax i32* %out, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umax_i32_addr64:
 ; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_umax_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_addr64(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile umax i32 addrspace(4)* %ptr, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile umax i32* %ptr, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umax_i32_ret_addr64:
 ; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_umax_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile umax i32 addrspace(4)* %ptr, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile umax i32* %ptr, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_min_i32_offset:
 ; CIVI: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_min_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -455,22 +455,22 @@
 ; CIVI: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_min_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_min_i32_addr64_offset:
 ; CIVI: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_min_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -478,60 +478,60 @@
 ; CIVI: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_min_i32:
 ; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_min_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32(i32* %out, i32 %in) {
 entry:
-  %val = atomicrmw volatile min i32 addrspace(4)* %out, i32 %in seq_cst
+  %val = atomicrmw volatile min i32* %out, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_min_i32_ret:
 ; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_min_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32_ret(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %val = atomicrmw volatile min i32 addrspace(4)* %out, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %val = atomicrmw volatile min i32* %out, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_min_i32_addr64:
 ; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_min_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_addr64(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile min i32 addrspace(4)* %ptr, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile min i32* %ptr, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_min_i32_ret_addr64:
 ; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_min_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile min i32 addrspace(4)* %ptr, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile min i32* %ptr, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umin_i32_offset:
 ; CIVI: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_umin_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -539,22 +539,22 @@
 ; CIVI: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_umin_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umin_i32_addr64_offset:
 ; CIVI: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_umin_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -562,60 +562,60 @@
 ; CIVI: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umin_i32:
 ; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_umin_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32(i32* %out, i32 %in) {
 entry:
-  %val = atomicrmw volatile umin i32 addrspace(4)* %out, i32 %in seq_cst
+  %val = atomicrmw volatile umin i32* %out, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umin_i32_ret:
 ; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_umin_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32_ret(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %val = atomicrmw volatile umin i32 addrspace(4)* %out, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %val = atomicrmw volatile umin i32* %out, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umin_i32_addr64:
 ; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_umin_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i32_addr64(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile umin i32 addrspace(4)* %ptr, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile umin i32* %ptr, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_umin_i32_ret_addr64:
 ; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]{{$}}
-  define amdgpu_kernel void @atomic_umin_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+  define amdgpu_kernel void @atomic_umin_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile umin i32 addrspace(4)* %ptr, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile umin i32* %ptr, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_or_i32_offset:
 ; CIVI: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_or_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile or i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -623,22 +623,22 @@
 ; CIVI: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_or_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile or i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_or_i32_addr64_offset:
 ; CIVI: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_or_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile or i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -646,60 +646,60 @@
 ; CIVI: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_or_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile or i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_or_i32:
 ; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_or_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32(i32* %out, i32 %in) {
 entry:
-  %val = atomicrmw volatile or i32 addrspace(4)* %out, i32 %in seq_cst
+  %val = atomicrmw volatile or i32* %out, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_or_i32_ret:
 ; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_or_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32_ret(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %val = atomicrmw volatile or i32 addrspace(4)* %out, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %val = atomicrmw volatile or i32* %out, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_or_i32_addr64:
 ; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_or_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_addr64(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile or i32 addrspace(4)* %ptr, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile or i32* %ptr, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_or_i32_ret_addr64:
 ; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_or_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile or i32 addrspace(4)* %ptr, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile or i32* %ptr, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xchg_i32_offset:
 ; CIVI: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_xchg_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile xchg i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -707,22 +707,22 @@
 ; CIVI: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_xchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile xchg i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xchg_i32_addr64_offset:
 ; CIVI: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_xchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile xchg i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -730,50 +730,50 @@
 ; CIVI: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile xchg i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xchg_i32:
 ; GCN: flat_atomic_swap v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}}
-define amdgpu_kernel void @atomic_xchg_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32(i32* %out, i32 %in) {
 entry:
-  %val = atomicrmw volatile xchg i32 addrspace(4)* %out, i32 %in seq_cst
+  %val = atomicrmw volatile xchg i32* %out, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xchg_i32_ret:
 ; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_xchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32_ret(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %val = atomicrmw volatile xchg i32 addrspace(4)* %out, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %val = atomicrmw volatile xchg i32* %out, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xchg_i32_addr64:
 ; GCN: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_xchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_addr64(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile xchg i32 addrspace(4)* %ptr, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile xchg i32* %ptr, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xchg_i32_ret_addr64:
 ; GCN: flat_atomic_swap [[RET:v[0-9]+]],  v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_xchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile xchg i32 addrspace(4)* %ptr, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile xchg i32* %ptr, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
@@ -782,10 +782,10 @@
 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_offset:
 ; CIVI: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
 ; GFX9: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] offset:16{{$}}
-define amdgpu_kernel void @atomic_cmpxchg_i32_offset(i32 addrspace(4)* %out, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_offset(i32* %out, i32 %in, i32 %old) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in seq_cst seq_cst
   ret void
 }
 
@@ -793,23 +793,23 @@
 ; CIVI: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
 ; GFX9: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]]
-define amdgpu_kernel void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret_offset(i32* %out, i32* %out2, i32 %in, i32 %old) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = cmpxchg volatile i32* %gep, i32 %old, i32 %in seq_cst seq_cst
   %flag = extractvalue { i32, i1 } %val, 0
-  store i32 %flag, i32 addrspace(4)* %out2
+  store i32 %flag, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_addr64_offset:
 ; CIVI: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
 ; GFX9: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] offset:16{{$}}
-define amdgpu_kernel void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_addr64_offset(i32* %out, i32 %in, i64 %index, i32 %old) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val  = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val  = cmpxchg volatile i32* %gep, i32 %old, i32 %in seq_cst seq_cst
   ret void
 }
 
@@ -817,63 +817,63 @@
 ; CIVI: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
 ; GFX9: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]]
-define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index, i32 %old) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val  = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val  = cmpxchg volatile i32* %gep, i32 %old, i32 %in seq_cst seq_cst
   %flag = extractvalue { i32, i1 } %val, 0
-  store i32 %flag, i32 addrspace(4)* %out2
+  store i32 %flag, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32:
 ; GCN: flat_atomic_cmpswap v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
-define amdgpu_kernel void @atomic_cmpxchg_i32(i32 addrspace(4)* %out, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32(i32* %out, i32 %in, i32 %old) {
 entry:
-  %val = cmpxchg volatile i32 addrspace(4)* %out, i32 %old, i32 %in seq_cst seq_cst
+  %val = cmpxchg volatile i32* %out, i32 %old, i32 %in seq_cst seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret:
 ; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] glc
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]]
-define amdgpu_kernel void @atomic_cmpxchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret(i32* %out, i32* %out2, i32 %in, i32 %old) {
 entry:
-  %val = cmpxchg volatile i32 addrspace(4)* %out, i32 %old, i32 %in seq_cst seq_cst
+  %val = cmpxchg volatile i32* %out, i32 %old, i32 %in seq_cst seq_cst
   %flag = extractvalue { i32, i1 } %val, 0
-  store i32 %flag, i32 addrspace(4)* %out2
+  store i32 %flag, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_addr64:
 ; GCN: flat_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}}
-define amdgpu_kernel void @atomic_cmpxchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_addr64(i32* %out, i32 %in, i64 %index, i32 %old) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = cmpxchg volatile i32 addrspace(4)* %ptr, i32 %old, i32 %in seq_cst seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = cmpxchg volatile i32* %ptr, i32 %old, i32 %in seq_cst seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret_addr64:
 ; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]]
-define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index, i32 %old) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = cmpxchg volatile i32 addrspace(4)* %ptr, i32 %old, i32 %in seq_cst seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = cmpxchg volatile i32* %ptr, i32 %old, i32 %in seq_cst seq_cst
   %flag = extractvalue { i32, i1 } %val, 0
-  store i32 %flag, i32 addrspace(4)* %out2
+  store i32 %flag, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xor_i32_offset:
 ; CIVI: flat_atomic_xor v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}}
 ; GFX9: flat_atomic_xor v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_xor_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32_offset(i32* %out, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile xor i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -881,22 +881,22 @@
 ; CIVI: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_xor_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  %val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %gep = getelementptr i32, i32* %out, i32 4
+  %val = atomicrmw volatile xor i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xor_i32_addr64_offset:
 ; CIVI: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
 ; GFX9: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_xor_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile xor i32* %gep, i32 %in seq_cst
   ret void
 }
 
@@ -904,50 +904,50 @@
 ; CIVI: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GFX9: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = atomicrmw volatile xor i32* %gep, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xor_i32:
 ; GCN: flat_atomic_xor v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}}
-define amdgpu_kernel void @atomic_xor_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32(i32* %out, i32 %in) {
 entry:
-  %val = atomicrmw volatile xor i32 addrspace(4)* %out, i32 %in seq_cst
+  %val = atomicrmw volatile xor i32* %out, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xor_i32_ret:
 ; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_xor_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32_ret(i32* %out, i32* %out2, i32 %in) {
 entry:
-  %val = atomicrmw volatile xor i32 addrspace(4)* %out, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %val = atomicrmw volatile xor i32* %out, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xor_i32_addr64:
 ; GCN: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define amdgpu_kernel void @atomic_xor_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_addr64(i32* %out, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile xor i32 addrspace(4)* %ptr, i32 %in seq_cst
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile xor i32* %ptr, i32 %in seq_cst
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_xor_i32_ret_addr64:
 ; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_xor_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %val = atomicrmw volatile xor i32 addrspace(4)* %ptr, i32 %in seq_cst
-  store i32 %val, i32 addrspace(4)* %out2
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %val = atomicrmw volatile xor i32* %ptr, i32 %in seq_cst
+  store i32 %val, i32* %out2
   ret void
 }
 
@@ -955,21 +955,21 @@
 ; CIVI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
 ; GFX9: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_load_i32_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_load_i32_offset(i32* %in, i32* %out) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %in, i32 4
-  %val = load atomic i32, i32 addrspace(4)* %gep  seq_cst, align 4
-  store i32 %val, i32 addrspace(4)* %out
+  %gep = getelementptr i32, i32* %in, i32 4
+  %val = load atomic i32, i32* %gep  seq_cst, align 4
+  store i32 %val, i32* %out
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_load_i32:
 ; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_load_i32(i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_load_i32(i32* %in, i32* %out) {
 entry:
-  %val = load atomic i32, i32 addrspace(4)* %in seq_cst, align 4
-  store i32 %val, i32 addrspace(4)* %out
+  %val = load atomic i32, i32* %in seq_cst, align 4
+  store i32 %val, i32* %out
   ret void
 }
 
@@ -977,60 +977,60 @@
 ; CIVI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
 ; GFX9: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] offset:16 glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_load_i32_addr64_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_load_i32_addr64_offset(i32* %in, i32* %out, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %in, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  %val = load atomic i32, i32 addrspace(4)* %gep seq_cst, align 4
-  store i32 %val, i32 addrspace(4)* %out
+  %ptr = getelementptr i32, i32* %in, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  %val = load atomic i32, i32* %gep seq_cst, align 4
+  store i32 %val, i32* %out
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_load_i32_addr64:
 ; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define amdgpu_kernel void @atomic_load_i32_addr64(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_load_i32_addr64(i32* %in, i32* %out, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %in, i64 %index
-  %val = load atomic i32, i32 addrspace(4)* %ptr seq_cst, align 4
-  store i32 %val, i32 addrspace(4)* %out
+  %ptr = getelementptr i32, i32* %in, i64 %index
+  %val = load atomic i32, i32* %ptr seq_cst, align 4
+  store i32 %val, i32* %out
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_store_i32_offset:
 ; CIVI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}}{{$}}
 ; GFX9: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_store_i32_offset(i32 %in, i32 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_store_i32_offset(i32 %in, i32* %out) {
 entry:
-  %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
-  store atomic i32 %in, i32 addrspace(4)* %gep  seq_cst, align 4
+  %gep = getelementptr i32, i32* %out, i32 4
+  store atomic i32 %in, i32* %gep  seq_cst, align 4
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_store_i32:
 ; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}}{{$}}
-define amdgpu_kernel void @atomic_store_i32(i32 %in, i32 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_store_i32(i32 %in, i32* %out) {
 entry:
-  store atomic i32 %in, i32 addrspace(4)* %out seq_cst, align 4
+  store atomic i32 %in, i32* %out seq_cst, align 4
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_store_i32_addr64_offset:
 ; CIVI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}}{{$}}
 ; GFX9: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} offset:16{{$}}
-define amdgpu_kernel void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_store_i32_addr64_offset(i32 %in, i32* %out, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
-  store atomic i32 %in, i32 addrspace(4)* %gep seq_cst, align 4
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  %gep = getelementptr i32, i32* %ptr, i32 4
+  store atomic i32 %in, i32* %gep seq_cst, align 4
   ret void
 }
 
 ; GCN-LABEL: {{^}}atomic_store_i32_addr64:
 ; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}}{{$}}
-define amdgpu_kernel void @atomic_store_i32_addr64(i32 %in, i32 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_store_i32_addr64(i32 %in, i32* %out, i64 %index) {
 entry:
-  %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
-  store atomic i32 %in, i32 addrspace(4)* %ptr seq_cst, align 4
+  %ptr = getelementptr i32, i32* %out, i64 %index
+  store atomic i32 %in, i32* %ptr seq_cst, align 4
   ret void
 }