blob: 72c121187691f68c74a7943439b44868044f3537 [file] [log] [blame]
Jay Foadeac23862019-08-23 10:07:43 +00001; RUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -amdgpu-atomic-optimizations=true -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN64,GFX7LESS %s
2; RUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=tonga -mattr=-flat-for-global -amdgpu-atomic-optimizations=true -amdgpu-dpp-combine=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN64,GFX8MORE,GFX8MORE64,GFX89 %s
3; RUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=gfx900 -mattr=-flat-for-global -amdgpu-atomic-optimizations=true -amdgpu-dpp-combine=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN64,GFX8MORE,GFX8MORE64,GFX89 %s
4; RUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -mattr=-flat-for-global -amdgpu-atomic-optimizations=true -amdgpu-dpp-combine=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN64,GFX8MORE,GFX8MORE64 %s
5; RUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -mattr=-flat-for-global -amdgpu-atomic-optimizations=true -amdgpu-dpp-combine=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN32,GFX8MORE,GFX8MORE32 %s
Neil Henning66416572018-10-08 15:49:19 +00006
7declare i32 @llvm.amdgcn.workitem.id.x()
Matt Arsenault8945b232020-01-16 14:05:46 -05008declare i32 @llvm.amdgcn.raw.buffer.atomic.add(i32, <4 x i32>, i32, i32, i32 immarg)
9declare i32 @llvm.amdgcn.struct.buffer.atomic.add(i32, <4 x i32>, i32, i32, i32, i32 immarg)
10declare i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i32 immarg)
Neil Henning66416572018-10-08 15:49:19 +000011
12; Show that what the atomic optimization pass will do for raw buffers.
13
14; GCN-LABEL: add_i32_constant:
Jay Foadeac23862019-08-23 10:07:43 +000015; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
16; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
17; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
18; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
19; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
20; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
21; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
Neil Henning66416572018-10-08 15:49:19 +000022; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
23; GCN: buffer_atomic_add v[[value]]
24define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out, <4 x i32> %inout) {
25entry:
Matt Arsenault8945b232020-01-16 14:05:46 -050026 %old = call i32 @llvm.amdgcn.raw.buffer.atomic.add(i32 5, <4 x i32> %inout, i32 0, i32 0, i32 0)
Neil Henning66416572018-10-08 15:49:19 +000027 store i32 %old, i32 addrspace(1)* %out
28 ret void
29}
30
31; GCN-LABEL: add_i32_uniform:
Jay Foadeac23862019-08-23 10:07:43 +000032; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
33; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
34; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
35; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
36; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
37; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
38; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
Neil Henning66416572018-10-08 15:49:19 +000039; GCN: s_mul_i32 s[[scalar_value:[0-9]+]], s{{[0-9]+}}, s[[popcount]]
40; GCN: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[scalar_value]]
41; GCN: buffer_atomic_add v[[value]]
42define amdgpu_kernel void @add_i32_uniform(i32 addrspace(1)* %out, <4 x i32> %inout, i32 %additive) {
43entry:
Matt Arsenault8945b232020-01-16 14:05:46 -050044 %old = call i32 @llvm.amdgcn.raw.buffer.atomic.add(i32 %additive, <4 x i32> %inout, i32 0, i32 0, i32 0)
Neil Henning66416572018-10-08 15:49:19 +000045 store i32 %old, i32 addrspace(1)* %out
46 ret void
47}
48
49; GCN-LABEL: add_i32_varying_vdata:
50; GFX7LESS-NOT: v_mbcnt_lo_u32_b32
51; GFX7LESS-NOT: v_mbcnt_hi_u32_b32
52; GFX7LESS-NOT: s_bcnt1_i32_b64
53; GFX7LESS: buffer_atomic_add v{{[0-9]+}}
Jay Foadeac23862019-08-23 10:07:43 +000054; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:1 row_mask:0xf bank_mask:0xf
55; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:2 row_mask:0xf bank_mask:0xf
56; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:4 row_mask:0xf bank_mask:0xf
57; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:8 row_mask:0xf bank_mask:0xf
58; GFX89: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_bcast:15 row_mask:0xa bank_mask:0xf
59; GFX89: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
60; GFX8MORE32: v_readlane_b32 s[[scalar_value:[0-9]+]], v{{[0-9]+}}, 31
61; GFX8MORE64: v_readlane_b32 s[[scalar_value:[0-9]+]], v{{[0-9]+}}, 63
62; GFX89: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} wave_shr:1 row_mask:0xf bank_mask:0xf
Neil Henning66416572018-10-08 15:49:19 +000063; GFX8MORE: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[scalar_value]]
64; GFX8MORE: buffer_atomic_add v[[value]]
65define amdgpu_kernel void @add_i32_varying_vdata(i32 addrspace(1)* %out, <4 x i32> %inout) {
66entry:
67 %lane = call i32 @llvm.amdgcn.workitem.id.x()
Matt Arsenault8945b232020-01-16 14:05:46 -050068 %old = call i32 @llvm.amdgcn.raw.buffer.atomic.add(i32 %lane, <4 x i32> %inout, i32 0, i32 0, i32 0)
69 store i32 %old, i32 addrspace(1)* %out
70 ret void
71}
72
73; GCN-LABEL: struct_add_i32_varying_vdata:
74; GFX7LESS-NOT: v_mbcnt_lo_u32_b32
75; GFX7LESS-NOT: v_mbcnt_hi_u32_b32
76; GFX7LESS-NOT: s_bcnt1_i32_b64
77; GFX7LESS: buffer_atomic_add v{{[0-9]+}}
78; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:1 row_mask:0xf bank_mask:0xf
79; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:2 row_mask:0xf bank_mask:0xf
80; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:4 row_mask:0xf bank_mask:0xf
81; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:8 row_mask:0xf bank_mask:0xf
82; GFX89: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_bcast:15 row_mask:0xa bank_mask:0xf
83; GFX89: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
84; GFX8MORE32: v_readlane_b32 s[[scalar_value:[0-9]+]], v{{[0-9]+}}, 31
85; GFX8MORE64: v_readlane_b32 s[[scalar_value:[0-9]+]], v{{[0-9]+}}, 63
86; GFX89: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} wave_shr:1 row_mask:0xf bank_mask:0xf
87; GFX8MORE: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[scalar_value]]
88; GFX8MORE: buffer_atomic_add v[[value]]
89define amdgpu_kernel void @struct_add_i32_varying_vdata(i32 addrspace(1)* %out, <4 x i32> %inout, i32 %vindex) {
90entry:
91 %lane = call i32 @llvm.amdgcn.workitem.id.x()
92 %old = call i32 @llvm.amdgcn.struct.buffer.atomic.add(i32 %lane, <4 x i32> %inout, i32 %vindex, i32 0, i32 0, i32 0)
Neil Henning66416572018-10-08 15:49:19 +000093 store i32 %old, i32 addrspace(1)* %out
94 ret void
95}
96
97; GCN-LABEL: add_i32_varying_offset:
98; GCN-NOT: v_mbcnt_lo_u32_b32
99; GCN-NOT: v_mbcnt_hi_u32_b32
100; GCN-NOT: s_bcnt1_i32_b64
101; GCN: buffer_atomic_add v{{[0-9]+}}
102define amdgpu_kernel void @add_i32_varying_offset(i32 addrspace(1)* %out, <4 x i32> %inout) {
103entry:
104 %lane = call i32 @llvm.amdgcn.workitem.id.x()
Matt Arsenault8945b232020-01-16 14:05:46 -0500105 %old = call i32 @llvm.amdgcn.raw.buffer.atomic.add(i32 1, <4 x i32> %inout, i32 %lane, i32 0, i32 0)
Neil Henning66416572018-10-08 15:49:19 +0000106 store i32 %old, i32 addrspace(1)* %out
107 ret void
108}
109
110; GCN-LABEL: sub_i32_constant:
Jay Foadeac23862019-08-23 10:07:43 +0000111; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
112; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
113; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
114; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
115; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
116; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
117; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
Neil Henning66416572018-10-08 15:49:19 +0000118; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
119; GCN: buffer_atomic_sub v[[value]]
120define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out, <4 x i32> %inout) {
121entry:
Matt Arsenault8945b232020-01-16 14:05:46 -0500122 %old = call i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32 5, <4 x i32> %inout, i32 0, i32 0, i32 0)
Neil Henning66416572018-10-08 15:49:19 +0000123 store i32 %old, i32 addrspace(1)* %out
124 ret void
125}
126
127; GCN-LABEL: sub_i32_uniform:
Jay Foadeac23862019-08-23 10:07:43 +0000128; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
129; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
130; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
131; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
132; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
133; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
134; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
Neil Henning66416572018-10-08 15:49:19 +0000135; GCN: s_mul_i32 s[[scalar_value:[0-9]+]], s{{[0-9]+}}, s[[popcount]]
136; GCN: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[scalar_value]]
137; GCN: buffer_atomic_sub v[[value]]
138define amdgpu_kernel void @sub_i32_uniform(i32 addrspace(1)* %out, <4 x i32> %inout, i32 %subitive) {
139entry:
Matt Arsenault8945b232020-01-16 14:05:46 -0500140 %old = call i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32 %subitive, <4 x i32> %inout, i32 0, i32 0, i32 0)
Neil Henning66416572018-10-08 15:49:19 +0000141 store i32 %old, i32 addrspace(1)* %out
142 ret void
143}
144
145; GCN-LABEL: sub_i32_varying_vdata:
146; GFX7LESS-NOT: v_mbcnt_lo_u32_b32
147; GFX7LESS-NOT: v_mbcnt_hi_u32_b32
148; GFX7LESS-NOT: s_bcnt1_i32_b64
149; GFX7LESS: buffer_atomic_sub v{{[0-9]+}}
Jay Foadeac23862019-08-23 10:07:43 +0000150; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:1 row_mask:0xf bank_mask:0xf
151; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:2 row_mask:0xf bank_mask:0xf
152; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:4 row_mask:0xf bank_mask:0xf
153; GFX8MORE: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_shr:8 row_mask:0xf bank_mask:0xf
154; GFX89: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_bcast:15 row_mask:0xa bank_mask:0xf
155; GFX89: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
156; GFX8MORE32: v_readlane_b32 s[[scalar_value:[0-9]+]], v{{[0-9]+}}, 31
157; GFX8MORE64: v_readlane_b32 s[[scalar_value:[0-9]+]], v{{[0-9]+}}, 63
158; GFX89: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} wave_shr:1 row_mask:0xf bank_mask:0xf
Neil Henning66416572018-10-08 15:49:19 +0000159; GFX8MORE: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[scalar_value]]
160; GFX8MORE: buffer_atomic_sub v[[value]]
161define amdgpu_kernel void @sub_i32_varying_vdata(i32 addrspace(1)* %out, <4 x i32> %inout) {
162entry:
163 %lane = call i32 @llvm.amdgcn.workitem.id.x()
Matt Arsenault8945b232020-01-16 14:05:46 -0500164 %old = call i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32 %lane, <4 x i32> %inout, i32 0, i32 0, i32 0)
Neil Henning66416572018-10-08 15:49:19 +0000165 store i32 %old, i32 addrspace(1)* %out
166 ret void
167}
168
169; GCN-LABEL: sub_i32_varying_offset:
170; GCN-NOT: v_mbcnt_lo_u32_b32
171; GCN-NOT: v_mbcnt_hi_u32_b32
172; GCN-NOT: s_bcnt1_i32_b64
173; GCN: buffer_atomic_sub v{{[0-9]+}}
174define amdgpu_kernel void @sub_i32_varying_offset(i32 addrspace(1)* %out, <4 x i32> %inout) {
175entry:
176 %lane = call i32 @llvm.amdgcn.workitem.id.x()
Matt Arsenault8945b232020-01-16 14:05:46 -0500177 %old = call i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32 1, <4 x i32> %inout, i32 %lane, i32 0, i32 0)
Neil Henning66416572018-10-08 15:49:19 +0000178 store i32 %old, i32 addrspace(1)* %out
179 ret void
180}