blob: 08edaf98927772f171f00689f3ded0956be2f72d [file] [log] [blame]
Neil Henning66416572018-10-08 15:49:19 +00001;RUN: llc < %s -march=amdgcn -mcpu=verde -amdgpu-atomic-optimizations=false -verify-machineinstrs | FileCheck %s -check-prefix=CHECK -check-prefix=SICI
2;RUN: llc < %s -march=amdgcn -mcpu=tonga -amdgpu-atomic-optimizations=false -verify-machineinstrs | FileCheck %s -check-prefix=CHECK -check-prefix=VI
Tim Renouf4f703f52018-08-21 11:07:10 +00003
4;CHECK-LABEL: {{^}}test1:
5;CHECK-NOT: s_waitcnt
6;CHECK: buffer_atomic_swap v0, off, s[0:3], 0 glc
7;CHECK: s_movk_i32 [[SOFS:s[0-9]+]], 0x1ffc
8;CHECK: s_waitcnt vmcnt(0)
9;CHECK: buffer_atomic_swap v0, v1, s[0:3], 0 offen glc
10;CHECK: s_waitcnt vmcnt(0)
11;CHECK: buffer_atomic_swap v0, v1, s[0:3], 0 offen offset:42 glc
12;CHECK-DAG: s_waitcnt vmcnt(0)
13;CHECK: buffer_atomic_swap v0, off, s[0:3], [[SOFS]] offset:4 glc
14;CHECK: s_waitcnt vmcnt(0)
15;CHECK: buffer_atomic_swap v0, off, s[0:3], 0{{$}}
16define amdgpu_ps float @test1(<4 x i32> inreg %rsrc, i32 %data, i32 %voffset) {
17main_body:
Nicolai Haehnleea36cd52018-10-08 16:53:48 +000018 %o1 = call i32 @llvm.amdgcn.raw.buffer.atomic.swap.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
19 %o3 = call i32 @llvm.amdgcn.raw.buffer.atomic.swap.i32(i32 %o1, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 0)
Tim Renouf4f703f52018-08-21 11:07:10 +000020 %off5 = add i32 %voffset, 42
Nicolai Haehnleea36cd52018-10-08 16:53:48 +000021 %o5 = call i32 @llvm.amdgcn.raw.buffer.atomic.swap.i32(i32 %o3, <4 x i32> %rsrc, i32 %off5, i32 0, i32 0)
22 %o6 = call i32 @llvm.amdgcn.raw.buffer.atomic.swap.i32(i32 %o5, <4 x i32> %rsrc, i32 4, i32 8188, i32 0)
23 %unused = call i32 @llvm.amdgcn.raw.buffer.atomic.swap.i32(i32 %o6, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
Tim Renouf4f703f52018-08-21 11:07:10 +000024 %out = bitcast i32 %o6 to float
25 ret float %out
26}
27
28;CHECK-LABEL: {{^}}test2:
29;CHECK-NOT: s_waitcnt
30;CHECK: buffer_atomic_add v0, v1, s[0:3], 0 offen glc{{$}}
31;CHECK: s_waitcnt vmcnt(0)
32;CHECK: buffer_atomic_sub v0, v1, s[0:3], 0 offen glc slc
33;CHECK: s_waitcnt vmcnt(0)
34;CHECK: buffer_atomic_smin v0, v1, s[0:3], 0 offen glc{{$}}
35;CHECK: s_waitcnt vmcnt(0)
36;CHECK: buffer_atomic_umin v0, v1, s[0:3], 0 offen glc slc
37;CHECK: s_waitcnt vmcnt(0)
38;CHECK: buffer_atomic_smax v0, v1, s[0:3], 0 offen glc{{$}}
39;CHECK: s_waitcnt vmcnt(0)
40;CHECK: buffer_atomic_umax v0, v1, s[0:3], 0 offen glc slc
41;CHECK: s_waitcnt vmcnt(0)
42;CHECK: buffer_atomic_and v0, v1, s[0:3], 0 offen glc{{$}}
43;CHECK: s_waitcnt vmcnt(0)
44;CHECK: buffer_atomic_or v0, v1, s[0:3], 0 offen glc slc
45;CHECK: s_waitcnt vmcnt(0)
46;CHECK: buffer_atomic_xor v0, v1, s[0:3], 0 offen glc
47define amdgpu_ps float @test2(<4 x i32> inreg %rsrc, i32 %data, i32 %voffset) {
48main_body:
Nicolai Haehnleea36cd52018-10-08 16:53:48 +000049 %t1 = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %data, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 0)
50 %t2 = call i32 @llvm.amdgcn.raw.buffer.atomic.sub.i32(i32 %t1, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 2)
51 %t3 = call i32 @llvm.amdgcn.raw.buffer.atomic.smin.i32(i32 %t2, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 0)
52 %t4 = call i32 @llvm.amdgcn.raw.buffer.atomic.umin.i32(i32 %t3, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 2)
53 %t5 = call i32 @llvm.amdgcn.raw.buffer.atomic.smax.i32(i32 %t4, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 0)
54 %t6 = call i32 @llvm.amdgcn.raw.buffer.atomic.umax.i32(i32 %t5, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 2)
55 %t7 = call i32 @llvm.amdgcn.raw.buffer.atomic.and.i32(i32 %t6, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 0)
56 %t8 = call i32 @llvm.amdgcn.raw.buffer.atomic.or.i32(i32 %t7, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 2)
57 %t9 = call i32 @llvm.amdgcn.raw.buffer.atomic.xor.i32(i32 %t8, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 0)
Tim Renouf4f703f52018-08-21 11:07:10 +000058 %out = bitcast i32 %t9 to float
59 ret float %out
60}
61
62; Ideally, we would teach tablegen & friends that cmpswap only modifies the
63; first vgpr. Since we don't do that yet, the register allocator will have to
64; create copies which we don't bother to track here.
65;
66;CHECK-LABEL: {{^}}test3:
67;CHECK-NOT: s_waitcnt
68;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], 0 glc
69;CHECK: s_waitcnt vmcnt(0)
70;CHECK: s_movk_i32 [[SOFS:s[0-9]+]], 0x1ffc
71;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, v2, s[0:3], 0 offen glc
72;CHECK: s_waitcnt vmcnt(0)
73;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, v2, s[0:3], 0 offen offset:44 glc
74;CHECK-DAG: s_waitcnt vmcnt(0)
75;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], [[SOFS]] offset:4 glc
76define amdgpu_ps float @test3(<4 x i32> inreg %rsrc, i32 %data, i32 %cmp, i32 %vindex, i32 %voffset) {
77main_body:
Nicolai Haehnleea36cd52018-10-08 16:53:48 +000078 %o1 = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %data, i32 %cmp, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
79 %o3 = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %o1, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 0, i32 0)
Tim Renouf4f703f52018-08-21 11:07:10 +000080 %ofs.5 = add i32 %voffset, 44
Nicolai Haehnleea36cd52018-10-08 16:53:48 +000081 %o5 = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %o3, i32 %cmp, <4 x i32> %rsrc, i32 %ofs.5, i32 0, i32 0)
82 %o6 = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %o5, i32 %cmp, <4 x i32> %rsrc, i32 4, i32 8188, i32 0)
Tim Renouf4f703f52018-08-21 11:07:10 +000083
84; Detecting the no-return variant doesn't work right now because of how the
85; intrinsic is replaced by an instruction that feeds into an EXTRACT_SUBREG.
86; Since there probably isn't a reasonable use-case of cmpswap that discards
87; the return value, that seems okay.
88;
Nicolai Haehnleea36cd52018-10-08 16:53:48 +000089; %unused = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %o6, i32 %cmp, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
Tim Renouf4f703f52018-08-21 11:07:10 +000090 %out = bitcast i32 %o6 to float
91 ret float %out
92}
93
94;CHECK-LABEL: {{^}}test4:
95;CHECK: buffer_atomic_add v0,
96define amdgpu_ps float @test4() {
97main_body:
Nicolai Haehnleea36cd52018-10-08 16:53:48 +000098 %v = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 1, <4 x i32> undef, i32 4, i32 0, i32 0)
Tim Renouf4f703f52018-08-21 11:07:10 +000099 %v.float = bitcast i32 %v to float
100 ret float %v.float
101}
102
Nicolai Haehnleea36cd52018-10-08 16:53:48 +0000103declare i32 @llvm.amdgcn.raw.buffer.atomic.swap.i32(i32, <4 x i32>, i32, i32, i32) #0
104declare i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32, <4 x i32>, i32, i32, i32) #0
105declare i32 @llvm.amdgcn.raw.buffer.atomic.sub.i32(i32, <4 x i32>, i32, i32, i32) #0
106declare i32 @llvm.amdgcn.raw.buffer.atomic.smin.i32(i32, <4 x i32>, i32, i32, i32) #0
107declare i32 @llvm.amdgcn.raw.buffer.atomic.umin.i32(i32, <4 x i32>, i32, i32, i32) #0
108declare i32 @llvm.amdgcn.raw.buffer.atomic.smax.i32(i32, <4 x i32>, i32, i32, i32) #0
109declare i32 @llvm.amdgcn.raw.buffer.atomic.umax.i32(i32, <4 x i32>, i32, i32, i32) #0
110declare i32 @llvm.amdgcn.raw.buffer.atomic.and.i32(i32, <4 x i32>, i32, i32, i32) #0
111declare i32 @llvm.amdgcn.raw.buffer.atomic.or.i32(i32, <4 x i32>, i32, i32, i32) #0
112declare i32 @llvm.amdgcn.raw.buffer.atomic.xor.i32(i32, <4 x i32>, i32, i32, i32) #0
113declare i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32, i32, <4 x i32>, i32, i32, i32) #0
Tim Renouf4f703f52018-08-21 11:07:10 +0000114
115attributes #0 = { nounwind }