Nicolai Haehnle | a609259 | 2016-06-15 07:13:05 +0000 | [diff] [blame] | 1 | ;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s -check-prefix=CHECK -check-prefix=SICI |
| 2 | ;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s -check-prefix=CHECK -check-prefix=VI |
Nicolai Haehnle | ad63638 | 2016-03-18 16:24:31 +0000 | [diff] [blame] | 3 | |
| 4 | ;CHECK-LABEL: {{^}}test1: |
Nikolay Haustov | 4f672a3 | 2016-04-29 09:02:30 +0000 | [diff] [blame] | 5 | ;CHECK: buffer_atomic_swap v0, off, s[0:3], 0 glc |
Nicolai Haehnle | a609259 | 2016-06-15 07:13:05 +0000 | [diff] [blame] | 6 | ;VI: s_movk_i32 [[SOFS:s[0-9]+]], 0x1fff |
Nicolai Haehnle | ad63638 | 2016-03-18 16:24:31 +0000 | [diff] [blame] | 7 | ;CHECK: s_waitcnt vmcnt(0) |
| 8 | ;CHECK: buffer_atomic_swap v0, v1, s[0:3], 0 idxen glc |
| 9 | ;CHECK: s_waitcnt vmcnt(0) |
| 10 | ;CHECK: buffer_atomic_swap v0, v2, s[0:3], 0 offen glc |
| 11 | ;CHECK: s_waitcnt vmcnt(0) |
| 12 | ;CHECK: buffer_atomic_swap v0, v[1:2], s[0:3], 0 idxen offen glc |
| 13 | ;CHECK: s_waitcnt vmcnt(0) |
| 14 | ;CHECK: buffer_atomic_swap v0, v2, s[0:3], 0 offen offset:42 glc |
| 15 | ;CHECK-DAG: s_waitcnt vmcnt(0) |
Nicolai Haehnle | a609259 | 2016-06-15 07:13:05 +0000 | [diff] [blame] | 16 | ;SICI: buffer_atomic_swap v0, v1, s[0:3], 0 offen glc |
| 17 | ;VI: buffer_atomic_swap v0, off, s[0:3], [[SOFS]] offset:1 glc |
Nicolai Haehnle | ad63638 | 2016-03-18 16:24:31 +0000 | [diff] [blame] | 18 | ;CHECK: s_waitcnt vmcnt(0) |
Nikolay Haustov | 4f672a3 | 2016-04-29 09:02:30 +0000 | [diff] [blame] | 19 | ;CHECK: buffer_atomic_swap v0, off, s[0:3], 0{{$}} |
Nicolai Haehnle | df3a20c | 2016-04-06 19:40:20 +0000 | [diff] [blame] | 20 | define amdgpu_ps float @test1(<4 x i32> inreg %rsrc, i32 %data, i32 %vindex, i32 %voffset) { |
Nicolai Haehnle | ad63638 | 2016-03-18 16:24:31 +0000 | [diff] [blame] | 21 | main_body: |
| 22 | %o1 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) |
| 23 | %o2 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o1, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 24 | %o3 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o2, <4 x i32> %rsrc, i32 0, i32 %voffset, i1 0) |
| 25 | %o4 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o3, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i1 0) |
| 26 | %ofs.5 = add i32 %voffset, 42 |
| 27 | %o5 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o4, <4 x i32> %rsrc, i32 0, i32 %ofs.5, i1 0) |
| 28 | %o6 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o5, <4 x i32> %rsrc, i32 0, i32 8192, i1 0) |
| 29 | %unused = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o6, <4 x i32> %rsrc, i32 0, i32 0, i1 0) |
| 30 | %out = bitcast i32 %o6 to float |
| 31 | ret float %out |
| 32 | } |
| 33 | |
| 34 | ;CHECK-LABEL: {{^}}test2: |
| 35 | ;CHECK: buffer_atomic_add v0, v1, s[0:3], 0 idxen glc |
| 36 | ;CHECK: s_waitcnt vmcnt(0) |
| 37 | ;CHECK: buffer_atomic_sub v0, v1, s[0:3], 0 idxen glc |
| 38 | ;CHECK: s_waitcnt vmcnt(0) |
| 39 | ;CHECK: buffer_atomic_smin v0, v1, s[0:3], 0 idxen glc |
| 40 | ;CHECK: s_waitcnt vmcnt(0) |
| 41 | ;CHECK: buffer_atomic_umin v0, v1, s[0:3], 0 idxen glc |
| 42 | ;CHECK: s_waitcnt vmcnt(0) |
| 43 | ;CHECK: buffer_atomic_smax v0, v1, s[0:3], 0 idxen glc |
| 44 | ;CHECK: s_waitcnt vmcnt(0) |
| 45 | ;CHECK: buffer_atomic_umax v0, v1, s[0:3], 0 idxen glc |
| 46 | ;CHECK: s_waitcnt vmcnt(0) |
| 47 | ;CHECK: buffer_atomic_and v0, v1, s[0:3], 0 idxen glc |
| 48 | ;CHECK: s_waitcnt vmcnt(0) |
| 49 | ;CHECK: buffer_atomic_or v0, v1, s[0:3], 0 idxen glc |
| 50 | ;CHECK: s_waitcnt vmcnt(0) |
| 51 | ;CHECK: buffer_atomic_xor v0, v1, s[0:3], 0 idxen glc |
Nicolai Haehnle | df3a20c | 2016-04-06 19:40:20 +0000 | [diff] [blame] | 52 | define amdgpu_ps float @test2(<4 x i32> inreg %rsrc, i32 %data, i32 %vindex) { |
Nicolai Haehnle | ad63638 | 2016-03-18 16:24:31 +0000 | [diff] [blame] | 53 | main_body: |
| 54 | %t1 = call i32 @llvm.amdgcn.buffer.atomic.add(i32 %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 55 | %t2 = call i32 @llvm.amdgcn.buffer.atomic.sub(i32 %t1, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 56 | %t3 = call i32 @llvm.amdgcn.buffer.atomic.smin(i32 %t2, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 57 | %t4 = call i32 @llvm.amdgcn.buffer.atomic.umin(i32 %t3, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 58 | %t5 = call i32 @llvm.amdgcn.buffer.atomic.smax(i32 %t4, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 59 | %t6 = call i32 @llvm.amdgcn.buffer.atomic.umax(i32 %t5, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 60 | %t7 = call i32 @llvm.amdgcn.buffer.atomic.and(i32 %t6, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 61 | %t8 = call i32 @llvm.amdgcn.buffer.atomic.or(i32 %t7, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 62 | %t9 = call i32 @llvm.amdgcn.buffer.atomic.xor(i32 %t8, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 63 | %out = bitcast i32 %t9 to float |
| 64 | ret float %out |
| 65 | } |
| 66 | |
| 67 | ; Ideally, we would teach tablegen & friends that cmpswap only modifies the |
| 68 | ; first vgpr. Since we don't do that yet, the register allocator will have to |
| 69 | ; create copies which we don't bother to track here. |
| 70 | ; |
| 71 | ;CHECK-LABEL: {{^}}test3: |
Nikolay Haustov | 4f672a3 | 2016-04-29 09:02:30 +0000 | [diff] [blame] | 72 | ;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], 0 glc |
Nicolai Haehnle | ad63638 | 2016-03-18 16:24:31 +0000 | [diff] [blame] | 73 | ;CHECK: s_waitcnt vmcnt(0) |
Nicolai Haehnle | a609259 | 2016-06-15 07:13:05 +0000 | [diff] [blame] | 74 | ;VI: s_movk_i32 [[SOFS:s[0-9]+]], 0x1fff |
Nicolai Haehnle | ad63638 | 2016-03-18 16:24:31 +0000 | [diff] [blame] | 75 | ;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, v2, s[0:3], 0 idxen glc |
| 76 | ;CHECK: s_waitcnt vmcnt(0) |
| 77 | ;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, v3, s[0:3], 0 offen glc |
| 78 | ;CHECK: s_waitcnt vmcnt(0) |
| 79 | ;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, v[2:3], s[0:3], 0 idxen offen glc |
| 80 | ;CHECK: s_waitcnt vmcnt(0) |
| 81 | ;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, v3, s[0:3], 0 offen offset:42 glc |
| 82 | ;CHECK-DAG: s_waitcnt vmcnt(0) |
Nicolai Haehnle | a609259 | 2016-06-15 07:13:05 +0000 | [diff] [blame] | 83 | ;SICI: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, s[0:3], 0 offen glc |
| 84 | ;VI: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], [[SOFS]] offset:1 glc |
Nicolai Haehnle | df3a20c | 2016-04-06 19:40:20 +0000 | [diff] [blame] | 85 | define amdgpu_ps float @test3(<4 x i32> inreg %rsrc, i32 %data, i32 %cmp, i32 %vindex, i32 %voffset) { |
Nicolai Haehnle | ad63638 | 2016-03-18 16:24:31 +0000 | [diff] [blame] | 86 | main_body: |
| 87 | %o1 = call i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32 %data, i32 %cmp, <4 x i32> %rsrc, i32 0, i32 0, i1 0) |
| 88 | %o2 = call i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32 %o1, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) |
| 89 | %o3 = call i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32 %o2, i32 %cmp, <4 x i32> %rsrc, i32 0, i32 %voffset, i1 0) |
| 90 | %o4 = call i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32 %o3, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i1 0) |
| 91 | %ofs.5 = add i32 %voffset, 42 |
| 92 | %o5 = call i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32 %o4, i32 %cmp, <4 x i32> %rsrc, i32 0, i32 %ofs.5, i1 0) |
| 93 | %o6 = call i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32 %o5, i32 %cmp, <4 x i32> %rsrc, i32 0, i32 8192, i1 0) |
| 94 | |
| 95 | ; Detecting the no-return variant doesn't work right now because of how the |
| 96 | ; intrinsic is replaced by an instruction that feeds into an EXTRACT_SUBREG. |
| 97 | ; Since there probably isn't a reasonable use-case of cmpswap that discards |
| 98 | ; the return value, that seems okay. |
| 99 | ; |
| 100 | ; %unused = call i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32 %o6, i32 %cmp, <4 x i32> %rsrc, i32 0, i32 0, i1 0) |
| 101 | %out = bitcast i32 %o6 to float |
| 102 | ret float %out |
| 103 | } |
| 104 | |
Nicolai Haehnle | 750082d | 2016-04-15 14:42:36 +0000 | [diff] [blame] | 105 | ;CHECK-LABEL: {{^}}test4: |
| 106 | ;CHECK: buffer_atomic_add v0, |
| 107 | define amdgpu_ps float @test4() { |
| 108 | main_body: |
| 109 | %v = call i32 @llvm.amdgcn.buffer.atomic.add(i32 1, <4 x i32> undef, i32 0, i32 4, i1 false) |
| 110 | %v.float = bitcast i32 %v to float |
| 111 | ret float %v.float |
| 112 | } |
| 113 | |
Nicolai Haehnle | df3a20c | 2016-04-06 19:40:20 +0000 | [diff] [blame] | 114 | declare i32 @llvm.amdgcn.buffer.atomic.swap(i32, <4 x i32>, i32, i32, i1) #0 |
| 115 | declare i32 @llvm.amdgcn.buffer.atomic.add(i32, <4 x i32>, i32, i32, i1) #0 |
| 116 | declare i32 @llvm.amdgcn.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i1) #0 |
| 117 | declare i32 @llvm.amdgcn.buffer.atomic.smin(i32, <4 x i32>, i32, i32, i1) #0 |
| 118 | declare i32 @llvm.amdgcn.buffer.atomic.umin(i32, <4 x i32>, i32, i32, i1) #0 |
| 119 | declare i32 @llvm.amdgcn.buffer.atomic.smax(i32, <4 x i32>, i32, i32, i1) #0 |
| 120 | declare i32 @llvm.amdgcn.buffer.atomic.umax(i32, <4 x i32>, i32, i32, i1) #0 |
| 121 | declare i32 @llvm.amdgcn.buffer.atomic.and(i32, <4 x i32>, i32, i32, i1) #0 |
| 122 | declare i32 @llvm.amdgcn.buffer.atomic.or(i32, <4 x i32>, i32, i32, i1) #0 |
| 123 | declare i32 @llvm.amdgcn.buffer.atomic.xor(i32, <4 x i32>, i32, i32, i1) #0 |
| 124 | declare i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32, i32, <4 x i32>, i32, i32, i1) #0 |
Nicolai Haehnle | ad63638 | 2016-03-18 16:24:31 +0000 | [diff] [blame] | 125 | |
Nicolai Haehnle | df3a20c | 2016-04-06 19:40:20 +0000 | [diff] [blame] | 126 | attributes #0 = { nounwind } |