Stanislav Mekhanoshin | e3eb42c | 2017-06-21 22:05:06 +0000 | [diff] [blame] | 1 | ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s |
| 2 | |
| 3 | ; GCN-LABEL: {{^}}add1: |
| 4 | ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} |
| 5 | ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]] |
| 6 | ; GCN-NOT: v_cndmask |
| 7 | |
| 8 | define amdgpu_kernel void @add1(i32 addrspace(1)* nocapture %arg) { |
| 9 | bb: |
| 10 | %x = tail call i32 @llvm.amdgcn.workitem.id.x() |
| 11 | %y = tail call i32 @llvm.amdgcn.workitem.id.y() |
| 12 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x |
| 13 | %v = load i32, i32 addrspace(1)* %gep, align 4 |
| 14 | %cmp = icmp ugt i32 %x, %y |
| 15 | %ext = zext i1 %cmp to i32 |
| 16 | %add = add i32 %v, %ext |
| 17 | store i32 %add, i32 addrspace(1)* %gep, align 4 |
| 18 | ret void |
| 19 | } |
| 20 | |
| 21 | ; GCN-LABEL: {{^}}sub1: |
| 22 | ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} |
| 23 | ; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, 0, [[CC]] |
| 24 | ; GCN-NOT: v_cndmask |
| 25 | |
| 26 | define amdgpu_kernel void @sub1(i32 addrspace(1)* nocapture %arg) { |
| 27 | bb: |
| 28 | %x = tail call i32 @llvm.amdgcn.workitem.id.x() |
| 29 | %y = tail call i32 @llvm.amdgcn.workitem.id.y() |
| 30 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x |
| 31 | %v = load i32, i32 addrspace(1)* %gep, align 4 |
| 32 | %cmp = icmp ugt i32 %x, %y |
| 33 | %ext = sext i1 %cmp to i32 |
| 34 | %add = add i32 %v, %ext |
| 35 | store i32 %add, i32 addrspace(1)* %gep, align 4 |
| 36 | ret void |
| 37 | } |
| 38 | |
Stanislav Mekhanoshin | a8b2693 | 2017-06-21 22:30:01 +0000 | [diff] [blame] | 39 | ; GCN-LABEL: {{^}}add_adde: |
| 40 | ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} |
| 41 | ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]] |
| 42 | ; GCN-NOT: v_cndmask |
| 43 | ; GCN-NOT: v_add |
| 44 | |
| 45 | define amdgpu_kernel void @add_adde(i32 addrspace(1)* nocapture %arg, i32 %a) { |
| 46 | bb: |
| 47 | %x = tail call i32 @llvm.amdgcn.workitem.id.x() |
| 48 | %y = tail call i32 @llvm.amdgcn.workitem.id.y() |
| 49 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x |
| 50 | %v = load i32, i32 addrspace(1)* %gep, align 4 |
| 51 | %cmp = icmp ugt i32 %x, %y |
| 52 | %ext = zext i1 %cmp to i32 |
| 53 | %adde = add i32 %v, %ext |
| 54 | %add2 = add i32 %adde, %a |
| 55 | store i32 %add2, i32 addrspace(1)* %gep, align 4 |
| 56 | ret void |
| 57 | } |
| 58 | |
| 59 | ; GCN-LABEL: {{^}}adde_add: |
| 60 | ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} |
| 61 | ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]] |
| 62 | ; GCN-NOT: v_cndmask |
| 63 | ; GCN-NOT: v_add |
| 64 | |
| 65 | define amdgpu_kernel void @adde_add(i32 addrspace(1)* nocapture %arg, i32 %a) { |
| 66 | bb: |
| 67 | %x = tail call i32 @llvm.amdgcn.workitem.id.x() |
| 68 | %y = tail call i32 @llvm.amdgcn.workitem.id.y() |
| 69 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x |
| 70 | %v = load i32, i32 addrspace(1)* %gep, align 4 |
| 71 | %cmp = icmp ugt i32 %x, %y |
| 72 | %ext = zext i1 %cmp to i32 |
| 73 | %add = add i32 %v, %a |
| 74 | %adde = add i32 %add, %ext |
| 75 | store i32 %adde, i32 addrspace(1)* %gep, align 4 |
| 76 | ret void |
| 77 | } |
| 78 | |
| 79 | ; GCN-LABEL: {{^}}sub_sube: |
| 80 | ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} |
| 81 | ; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]] |
| 82 | ; GCN-NOT: v_cndmask |
| 83 | ; GCN-NOT: v_sub |
| 84 | |
| 85 | define amdgpu_kernel void @sub_sube(i32 addrspace(1)* nocapture %arg, i32 %a) { |
| 86 | bb: |
| 87 | %x = tail call i32 @llvm.amdgcn.workitem.id.x() |
| 88 | %y = tail call i32 @llvm.amdgcn.workitem.id.y() |
| 89 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x |
| 90 | %v = load i32, i32 addrspace(1)* %gep, align 4 |
| 91 | %cmp = icmp ugt i32 %x, %y |
| 92 | %ext = sext i1 %cmp to i32 |
| 93 | %adde = add i32 %v, %ext |
| 94 | %sub = sub i32 %adde, %a |
| 95 | store i32 %sub, i32 addrspace(1)* %gep, align 4 |
| 96 | ret void |
| 97 | } |
| 98 | |
| 99 | ; GCN-LABEL: {{^}}sube_sub: |
| 100 | ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} |
| 101 | ; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]] |
| 102 | ; GCN-NOT: v_cndmask |
| 103 | ; GCN-NOT: v_sub |
| 104 | |
| 105 | define amdgpu_kernel void @sube_sub(i32 addrspace(1)* nocapture %arg, i32 %a) { |
| 106 | bb: |
| 107 | %x = tail call i32 @llvm.amdgcn.workitem.id.x() |
| 108 | %y = tail call i32 @llvm.amdgcn.workitem.id.y() |
| 109 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x |
| 110 | %v = load i32, i32 addrspace(1)* %gep, align 4 |
| 111 | %cmp = icmp ugt i32 %x, %y |
| 112 | %ext = sext i1 %cmp to i32 |
| 113 | %sub = sub i32 %v, %a |
| 114 | %adde = add i32 %sub, %ext |
| 115 | store i32 %adde, i32 addrspace(1)* %gep, align 4 |
| 116 | ret void |
| 117 | } |
| 118 | |
Stanislav Mekhanoshin | 3ed38c6 | 2017-06-21 23:46:22 +0000 | [diff] [blame] | 119 | ; GCN-LABEL: {{^}}zext_flclass: |
| 120 | ; GCN: v_cmp_class_f32_e{{32|64}} [[CC:[^,]+]], |
| 121 | ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]] |
| 122 | ; GCN-NOT: v_cndmask |
| 123 | |
| 124 | define amdgpu_kernel void @zext_flclass(i32 addrspace(1)* nocapture %arg, float %x) { |
| 125 | bb: |
| 126 | %id = tail call i32 @llvm.amdgcn.workitem.id.x() |
| 127 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %id |
| 128 | %v = load i32, i32 addrspace(1)* %gep, align 4 |
| 129 | %cmp = tail call zeroext i1 @llvm.amdgcn.class.f32(float %x, i32 608) |
| 130 | %ext = zext i1 %cmp to i32 |
| 131 | %add = add i32 %v, %ext |
| 132 | store i32 %add, i32 addrspace(1)* %gep, align 4 |
| 133 | ret void |
| 134 | } |
| 135 | |
| 136 | ; GCN-LABEL: {{^}}sext_flclass: |
| 137 | ; GCN: v_cmp_class_f32_e{{32|64}} [[CC:[^,]+]], |
| 138 | ; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, 0, [[CC]] |
| 139 | ; GCN-NOT: v_cndmask |
| 140 | |
| 141 | define amdgpu_kernel void @sext_flclass(i32 addrspace(1)* nocapture %arg, float %x) { |
| 142 | bb: |
| 143 | %id = tail call i32 @llvm.amdgcn.workitem.id.x() |
| 144 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %id |
| 145 | %v = load i32, i32 addrspace(1)* %gep, align 4 |
| 146 | %cmp = tail call zeroext i1 @llvm.amdgcn.class.f32(float %x, i32 608) |
| 147 | %ext = sext i1 %cmp to i32 |
| 148 | %add = add i32 %v, %ext |
| 149 | store i32 %add, i32 addrspace(1)* %gep, align 4 |
| 150 | ret void |
| 151 | } |
| 152 | |
Stanislav Mekhanoshin | 6851ddf | 2017-06-27 18:25:26 +0000 | [diff] [blame^] | 153 | ; GCN-LABEL: {{^}}add_and: |
| 154 | ; GCN: s_and_b64 [[CC:[^,]+]], |
| 155 | ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]] |
| 156 | ; GCN-NOT: v_cndmask |
| 157 | |
| 158 | define amdgpu_kernel void @add_and(i32 addrspace(1)* nocapture %arg) { |
| 159 | bb: |
| 160 | %x = tail call i32 @llvm.amdgcn.workitem.id.x() |
| 161 | %y = tail call i32 @llvm.amdgcn.workitem.id.y() |
| 162 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x |
| 163 | %v = load i32, i32 addrspace(1)* %gep, align 4 |
| 164 | %cmp1 = icmp ugt i32 %x, %y |
| 165 | %cmp2 = icmp ugt i32 %x, 1 |
| 166 | %cmp = and i1 %cmp1, %cmp2 |
| 167 | %ext = zext i1 %cmp to i32 |
| 168 | %add = add i32 %v, %ext |
| 169 | store i32 %add, i32 addrspace(1)* %gep, align 4 |
| 170 | ret void |
| 171 | } |
| 172 | |
Stanislav Mekhanoshin | 3ed38c6 | 2017-06-21 23:46:22 +0000 | [diff] [blame] | 173 | declare i1 @llvm.amdgcn.class.f32(float, i32) #0 |
| 174 | |
Stanislav Mekhanoshin | e3eb42c | 2017-06-21 22:05:06 +0000 | [diff] [blame] | 175 | declare i32 @llvm.amdgcn.workitem.id.x() #0 |
| 176 | |
| 177 | declare i32 @llvm.amdgcn.workitem.id.y() #0 |
| 178 | |
| 179 | attributes #0 = { nounwind readnone speculatable } |